From 3acf9ce11fde57e31e24364309e77ebf40b5373e Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 10 May 2024 14:29:29 -0700 Subject: [PATCH] Retire Senlin: remove repo content Senlin project is retiring - https://review.opendev.org/c/openstack/governance/+/919347 this commit remove the content of this project repo Depends-On: https://review.opendev.org/c/openstack/project-config/+/919348/ Change-Id: I5f524835683b10a8472ee90e5de5087b37ff502b --- .coveragerc | 9 - .gitignore | 30 - .stestr.conf | 3 - .zuul.yaml | 180 -- CONTRIBUTING.rst | 91 - FEATURES.rst | 284 -- HACKING.rst | 57 - LICENSE | 176 -- README.rst | 102 +- TODO.rst | 98 - api-ref/source/actions.inc | 188 -- api-ref/source/build_info.inc | 50 - api-ref/source/cluster_policies.inc | 113 - api-ref/source/clusters.inc | 1158 ------- api-ref/source/conf.py | 205 -- api-ref/source/events.inc | 129 - api-ref/source/index.rst | 22 - api-ref/source/nodes.inc | 654 ---- api-ref/source/parameters.yaml | 1546 ---------- api-ref/source/policies.inc | 358 --- api-ref/source/policy_types.inc | 111 - api-ref/source/profile_types.inc | 156 - api-ref/source/profiles.inc | 356 --- api-ref/source/receivers.inc | 360 --- .../source/samples/action-get-request.json | 5 - .../source/samples/action-get-response.json | 25 - .../source/samples/actions-list-response.json | 50 - .../source/samples/build-show-response.json | 10 - .../samples/cluster-action-response.json | 3 - .../samples/cluster-add-nodes-request.json | 8 - .../cluster-attach-policy-request.json | 6 - .../samples/cluster-attrs-list-response.json | 16 - .../source/samples/cluster-check-request.json | 3 - .../cluster-complete-lifecycle-request.json | 5 - .../samples/cluster-create-request.json | 12 - .../samples/cluster-create-response.json | 26 - .../samples/cluster-del-nodes-request.json | 9 - .../cluster-detach-policy-request.json | 5 - .../source/samples/cluster-list-response.json | 26 - .../samples/cluster-operation-request.json | 10 - .../cluster-policies-list-response.json | 22 - .../samples/cluster-policy-show-response.json | 11 - .../samples/cluster-recover-request.json | 9 - .../cluster-replace-nodes-request.json | 7 - .../samples/cluster-resize-request.json | 9 - .../samples/cluster-scale-in-request.json | 5 - .../samples/cluster-scale-out-request.json | 5 - .../source/samples/cluster-show-response.json | 26 - .../cluster-update-policy-request.json | 6 - .../samples/cluster-update-request.json | 9 - .../samples/cluster-update-response.json | 26 - .../samples/clusters-list-response.json | 32 - .../source/samples/event-show-response.json | 18 - .../source/samples/events-list-response.json | 20 - .../source/samples/node-action-response.json | 3 - .../samples/node-adopt-preview-request.json | 6 - .../samples/node-adopt-preview-response.json | 20 - .../source/samples/node-adopt-request.json | 9 - .../source/samples/node-adopt-response.json | 22 - .../source/samples/node-check-request.json | 3 - .../source/samples/node-create-request.json | 9 - .../source/samples/node-create-response.json | 23 - .../source/samples/node-list-response.json | 25 - .../samples/node-operation-request.json | 5 - .../source/samples/node-recover-request.json | 9 - .../source/samples/node-show-response.json | 23 - .../source/samples/node-update-request.json | 5 - .../source/samples/policy-create-request.json | 17 - .../samples/policy-create-response.json | 26 - .../source/samples/policy-list-response.json | 26 - .../source/samples/policy-show-response.json | 26 - .../policy-type-show-response-v1.5.json | 58 - .../samples/policy-type-show-response.json | 50 - .../policy-types-list-response-v1.5.json | 56 - .../samples/policy-types-list-response.json | 28 - .../source/samples/policy-update-request.json | 5 - .../samples/policy-update-response.json | 24 - .../samples/policy-validate-request.json | 16 - .../samples/policy-validate-response.json | 25 - .../samples/profile-create-request.json | 24 - .../samples/profile-create-response.json | 31 - .../source/samples/profile-list-response.json | 30 - .../source/samples/profile-show-response.json | 28 - .../samples/profile-type-ops-response.json | 24 - .../profile-type-show-response-v1.5.json | 70 - .../samples/profile-type-show-response.json | 62 - .../profile-types-list-response-v1.5.json | 40 - .../samples/profile-types-list-response.json | 13 - .../samples/profile-update-request.json | 6 - .../samples/profile-update-response.json | 30 - .../samples/profile-validate-request.json | 22 - .../samples/profile-validate-response.json | 31 - .../samples/receiver-create-request.json | 11 - .../samples/receiver-create-response.json | 25 - .../samples/receiver-show-response.json | 25 - .../samples/receiver-update-request.json | 9 - .../samples/receiver-update-response.json | 25 - .../samples/receivers-list-response.json | 27 - .../samples/services-list-response.json | 14 - .../source/samples/version-show-response.json | 25 - .../samples/versions-list-response.json | 27 - .../samples/webhook-action-response.json | 3 - api-ref/source/services.inc | 60 - api-ref/source/status.yaml | 61 - api-ref/source/versions.inc | 101 - api-ref/source/webhooks.inc | 55 - bindep.txt | 2 - contrib/kubernetes/README.rst | 99 - contrib/kubernetes/TODO.rst | 13 - contrib/kubernetes/examples/kubemaster.yaml | 7 - contrib/kubernetes/examples/kubenode.yaml | 7 - contrib/kubernetes/kube/__init__.py | 0 contrib/kubernetes/kube/base.py | 275 -- contrib/kubernetes/kube/master.py | 279 -- contrib/kubernetes/kube/scripts/master.sh | 33 - contrib/kubernetes/kube/scripts/worker.sh | 10 - contrib/kubernetes/kube/worker.py | 353 --- contrib/kubernetes/requirements.txt | 1 - contrib/kubernetes/setup.cfg | 28 - contrib/kubernetes/setup.py | 27 - contrib/vdu/README.rst | 13 - contrib/vdu/examples/vdu.yaml | 17 - contrib/vdu/requirements.txt | 1 - contrib/vdu/setup.cfg | 31 - contrib/vdu/setup.py | 30 - contrib/vdu/vdu/__init__.py | 0 contrib/vdu/vdu/server.py | 1469 --------- devstack/README.rst | 21 - devstack/files/apache-senlin-api.template | 28 - devstack/lib/senlin | 329 -- devstack/plugin.sh | 58 - devstack/settings | 6 - doc/.gitignore | 2 - doc/Makefile | 159 - doc/README.rst | 55 - doc/requirements.txt | 8 - doc/source/admin/authentication.rst | 21 - doc/source/admin/index.rst | 21 - doc/source/conf.py | 109 - doc/source/configuration/config.rst | 30 - doc/source/configuration/index.rst | 24 - doc/source/configuration/policy.rst | 31 - .../configuration/sample-policy-yaml.rst | 30 - doc/source/contributor/action.rst | 317 -- doc/source/contributor/api_microversion.rst | 374 --- doc/source/contributor/authorization.rst | 191 -- doc/source/contributor/cluster.rst | 624 ---- doc/source/contributor/event_dispatcher.rst | 125 - doc/source/contributor/node.rst | 202 -- doc/source/contributor/osprofiler.rst | 68 - doc/source/contributor/plugin_guide.rst | 29 - .../contributor/policies/affinity_v1.rst | 231 -- .../contributor/policies/deletion_v1.rst | 272 -- doc/source/contributor/policies/health_v1.rst | 373 --- .../contributor/policies/load_balance_v1.rst | 258 -- doc/source/contributor/policies/region_v1.rst | 232 -- .../contributor/policies/scaling_v1.rst | 149 - doc/source/contributor/policies/zone_v1.rst | 235 -- doc/source/contributor/policy.rst | 146 - doc/source/contributor/policy_type.rst | 293 -- doc/source/contributor/profile.rst | 149 - doc/source/contributor/profile_type.rst | 271 -- doc/source/contributor/receiver.rst | 226 -- doc/source/contributor/reviews.rst | 42 - doc/source/contributor/testing.rst | 338 -- doc/source/ext/__init__.py | 0 doc/source/ext/resources.py | 291 -- doc/source/index.rst | 226 -- doc/source/install/index.rst | 28 - doc/source/install/install-devstack.rst | 45 - doc/source/install/install-rdo.rst | 246 -- doc/source/install/install-source.rst | 145 - doc/source/install/verify.rst | 50 - doc/source/overview.rst | 80 - doc/source/reference/api.rst | 24 - doc/source/reference/glossary.rst | 146 - doc/source/reference/man/index.rst | 25 - doc/source/reference/man/senlin-api.rst | 51 - doc/source/reference/man/senlin-conductor.rst | 47 - doc/source/reference/man/senlin-engine.rst | 48 - .../reference/man/senlin-health-manager.rst | 48 - doc/source/reference/man/senlin-manage.rst | 98 - doc/source/reference/man/senlin-status.rst | 78 - doc/source/scenarios/affinity.rst | 119 - .../scenarios/autoscaling_ceilometer.rst | 282 -- doc/source/scenarios/autoscaling_heat.rst | 251 -- doc/source/scenarios/autoscaling_overview.rst | 55 - doc/source/scenarios/ex_lbas.yaml | 167 - doc/source/tutorial/autoscaling.rst | 172 -- doc/source/tutorial/basics.rst | 179 -- doc/source/tutorial/policies.rst | 91 - doc/source/tutorial/receivers.rst | 88 - doc/source/user/actions.rst | 184 -- doc/source/user/bindings.rst | 174 -- doc/source/user/clusters.rst | 503 --- doc/source/user/events.rst | 226 -- doc/source/user/membership.rst | 165 - doc/source/user/nodes.rst | 487 --- doc/source/user/policies.rst | 246 -- doc/source/user/policy_types.rst | 208 -- doc/source/user/policy_types/affinity.rst | 134 - doc/source/user/policy_types/batch.rst | 52 - doc/source/user/policy_types/deletion.rst | 183 -- doc/source/user/policy_types/health.rst | 128 - .../user/policy_types/load_balancing.rst | 295 -- .../user/policy_types/region_placement.rst | 92 - doc/source/user/policy_types/scaling.rst | 158 - .../user/policy_types/zone_placement.rst | 85 - doc/source/user/profile_types.rst | 225 -- doc/source/user/profile_types/docker.rst | 35 - doc/source/user/profile_types/nova.rst | 35 - doc/source/user/profile_types/stack.rst | 35 - doc/source/user/profiles.rst | 426 --- doc/source/user/receivers.rst | 185 -- doc/specs/README.rst | 27 - doc/specs/approved/README.rst | 3 - doc/specs/approved/container-cluster.rst | 176 -- doc/specs/approved/generic-event.rst | 256 -- doc/specs/cluster-fast-scaling.rst | 159 - doc/specs/fail-fast-on-locked_resource.rst | 257 -- doc/specs/lifecycle-hook.rst | 390 --- doc/specs/multiple-detection-modes.rst | 317 -- doc/specs/rejected/README.rst | 2 - doc/specs/template.rst | 363 --- doc/specs/workflow-recover.rst | 172 -- etc/senlin/README-senlin.conf.txt | 4 - etc/senlin/api-paste.ini | 48 - examples/policies/WIP/batching_1_1_0.yaml | 13 - examples/policies/WIP/health_policy_lb.yaml | 51 - examples/policies/WIP/lb_policy_aws.spec | 21 - examples/policies/affinity_policy.yaml | 8 - examples/policies/batch_policy.yaml | 15 - examples/policies/deletion_policy.yaml | 19 - .../deletion_policy_lifecycle_hook.yaml | 14 - examples/policies/health_policy_event.yaml | 17 - examples/policies/health_policy_poll.yaml | 20 - examples/policies/health_policy_poll_url.yaml | 19 - examples/policies/lb_policy.yaml | 80 - examples/policies/placement_region.yaml | 12 - examples/policies/placement_zone.yaml | 10 - examples/policies/scaling_policy.yaml | 26 - examples/profiles/README.rst | 28 - .../docker_container/docker_basic.yaml | 11 - .../nova_server/heat_stack_nova_server.yaml | 9 - .../nova_server/nova_server_template.yaml | 56 - .../heat_stack_random_string.yaml | 7 - .../random_string/random_string_template.yaml | 13 - .../profiles/nova_server/cirros_basic.yaml | 14 - install.sh | 121 - releasenotes/notes/.placeholder | 0 ...o-alembic-migrations-f442d0b58c3f13a6.yaml | 4 - ...d-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml | 4 - ...ontrol-admin-project-762c8e91e8875738.yaml | 4 - ...-policy-optimization-06ea45eb3dcbe33a.yaml | 4 - .../notes/action-purge-11db5d8018b8389a.yaml | 4 - .../action-update-api-fc51b1582c0b5902.yaml | 7 - .../add-action-filter-40e775a26082f780.yaml | 6 - ...tion-to-loadbalancer-74b512fb0c138bfe.yaml | 5 - .../affinity-policy-fix-72ae92dc8ffcff00.yaml | 3 - .../notes/api-ref-fixes-19bc963430c32ecf.yaml | 4 - .../notes/az-info-9344b8d54c0b2665.yaml | 4 - .../batch-scheduling-ca5d98d41fc72973.yaml | 6 - .../notes/bdmv2-fix-b9ff742cdc282087.yaml | 4 - .../notes/bug-1789488-75ee756a53722cd1.yaml | 6 - .../notes/bug-1811161-c6416ad27ab0a2ce.yaml | 6 - .../notes/bug-1811294-262d4b9cced3f505.yaml | 6 - .../notes/bug-1813089-db57e7bdfd3983ac.yaml | 7 - .../notes/bug-1815540-2664a975db5fafc8.yaml | 6 - .../notes/bug-1817379-23dd2c925259d5f2.yaml | 5 - .../notes/bug-1817604-41d4b8f6c6f920e4.yaml | 9 - .../notes/bug-1828856-bf7a30a6eb00238a.yaml | 12 - .../notes/bug-2048099-74f0ca874cfbe6b4.yaml | 6 - .../notes/bug-2048100-6b4156df956a6f14.yaml | 6 - .../notes/bug-2048452-8a690353815601a0.yaml | 9 - .../notes/bug-2048726-a830a7838661a41f.yaml | 6 - .../notes/bug-2049191-8ee2d8352b05cfef.yaml | 8 - ...capacity-calculation-4fd389ff12107dfb.yaml | 6 - ...ons-for-cluster-node-438ca5268e7fd258.yaml | 5 - ...ster-action-refresh-9eeb60f1f2c1d0abr.yaml | 5 - ...uster-check-interval-b01e8140cc83760e.yaml | 5 - .../cluster-collect-90e460c7bfede347.yaml | 3 - ...ster-delete-conflict-94261706eb29e9bb.yaml | 5 - ...r-delete-with-policy-d2dca161e42ee6ba.yaml | 8 - ...ter-desired-capacity-d876347f69b04b4f.yaml | 6 - .../notes/cluster-lock-e283fb9bf1002bca.yaml | 3 - ...ster-node-dependents-3bdbebd773d276d1.yaml | 4 - .../cluster-node-status-e7fced162b415452.yaml | 3 - .../notes/cluster-ops-433a5aa608a0eb7f.yaml | 5 - .../cluster-recover-d87d429873b376db.yaml | 5 - .../cluster-resize-fix-bee18840a98907d8.yaml | 4 - ...cale-action-conflict-0e1e64591e943e25.yaml | 22 - ...luster-status-update-dd9133092aef05ab.yaml | 5 - ...ute-instance-fencing-63b931cdf35b127c.yaml | 4 - ...default-nova-timeout-f0bd73811ac3a8bb.yaml | 4 - .../notes/config-doc-cb8b37e360422301.yaml | 4 - ...er-thread-pool-size-de608624a6cb4b43r.yaml | 4 - ...p-node-before-delete-4ab08e61b40e4474.yaml | 4 - .../config-trust-roles-416e26e03036ae40.yaml | 5 - .../notes/container-ops-e57d096742202206.yaml | 4 - .../container-profile-152bf2908c70ffad.yaml | 5 - .../db-action-retries-d471fe85b4510afd.yaml | 4 - ...ject_safe-for-admins-2986f15e74cd1d1c.yaml | 11 - .../db-locking-logic-9c97b04ce8c52989.yaml | 4 - .../notes/db-retries-da4a0d9d83ad56bb.yaml | 5 - .../notes/delete-batch-a16ee5ed2512eab7.yaml | 6 - ...lete_with_dependants-823c6c4921f22575.yaml | 5 - .../deletion-policy-11bcb7c0e90bbfcc.yaml | 5 - ...n-policy-node-delete-dc70da377b2a4f77.yaml | 4 - ...ormatted-policy-file-0c29555b3ea0c984.yaml | 20 - ...y-nodes-after-remove-37bffdc35a9b7a96.yaml | 4 - .../notes/doc-fixes-0783e8120b61299br.yaml | 3 - .../notes/doc-fixes-5057bf93464810cc.yaml | 10 - .../notes/doc-fixes-685c64d1ef509041.yaml | 4 - .../notes/doc-fixes-cd8c7006f8c66387.yaml | 3 - .../notes/doc-fixes-e60bb1a486f67e0c.yaml | 4 - .../notes/docker-reboot-999ec624186864e3.yaml | 4 - .../notes/docker-start-c850c256c6149f4f.yaml | 4 - .../notes/docker-update-1b465241ca78873c.yaml | 4 - .../notes/drop-py-2-7-154eeefdc9886091.yaml | 6 - .../drop-py34-support-21e20efb9bf0b326.yaml | 4 - ...p-python-3-6-and-3-7-3a90d172a5e43660.yaml | 5 - .../notes/dynamic-timer-67f053499f4b32e2.yaml | 4 - ...nforce-multi-tenancy-ee27b9bfec7ba405.yaml | 4 - .../error-messages-bd8b5a6d12e2c4af.yaml | 4 - ...-for-derived-actions-8bd44367fa683dbc.yaml | 5 - .../notes/event-list-b268bb778efa9ee1.yaml | 5 - .../event-notification-eda06b43ce17a081.yaml | 6 - .../notes/event-purge-db868a063e18eafb.yaml | 4 - .../event-table-change-dcb42c8b6d145fec.yaml | 4 - ...t-on-locked-resource-eee28572dc40009a.yaml | 15 - ...ix-action-triggering-e880b02234028315.yaml | 5 - ...fix-aodh-integration-41e69276158ad233.yaml | 7 - .../fix-cluster-index-ae0060b6337d6d55.yaml | 3 - .../notes/fix-cooldown-5082711989ecd536.yaml | 4 - .../fix-db-deadlock-1d2bdb9ce785734a.yaml | 4 - .../fix-delete-apis-bf9f47b5fcf8f3e6.yaml | 4 - ...ix-delete-node-error-31575d62bc9375ec.yaml | 3 - ...desired-when-omitted-e7ffc0aa72ab8cc9.yaml | 6 - ...x-dup-of-action-dump-0b95a07adf3ccdba.yaml | 4 - .../fix-health-check-5d77795885676661.yaml | 3 - ...health-cluster-check-5ce1c0309c03c5d5.yaml | 5 - .../fix-health-mgr-opts-99898614f37c5d74.yaml | 4 - ...x-health-policy-bind-9b6ed0e51939eac3.yaml | 3 - ...twork-error-handling-e78da90b6bc2319c.yaml | 3 - .../fix-node-get-detail-4e6d30c3a6b2ce60.yaml | 5 - .../notes/fix-node-leak-9b1c08342a52542d.yaml | 5 - .../fix-node-recover-5af129bf0688577d.yaml | 4 - ...x-node-status-for-lb-fc7714da09bec2fb.yaml | 6 - ...stacksdk -exception-b762e649bfab4b31r.yaml | 7 - ...-policy-type-version-939a1fb4e84908f9.yaml | 6 - ...x-port-id-parameter-de4679438a891a67r.yaml | 4 - .../fix-recover-trigger-749600f500f7bf4a.yaml | 5 - .../fix-registry-claim-5421dca1ed9b0783.yaml | 5 - ...group-with-same-name-887487416f4525a1.yaml | 4 - .../fix-tag-for-stacks-2ef70be061e80253.yaml | 5 - .../fix-tox-cover-9fc01b5e0594aa19r.yaml | 4 - ...fix-update-lb-policy-0af6e8866f3b5543.yaml | 7 - ...bid-cluster-deletion-a8b0f55aaf0aa106.yaml | 4 - .../notes/force-delete-0b185ea6d70ed81e.yaml | 3 - .../gc-for-dead-engine-2246c714edc9a2df.yaml | 6 - .../health-add-cleanup-2d5143ec2bb78e55.yaml | 3 - ...ealth-check-interval-b3850c072600bfdf.yaml | 5 - .../health-lb-polling-32d83803c77cc1d8.yaml | 4 - ...health-manager-fixes-d5955f9af88102fc.yaml | 9 - ...lth-manager-listener-8ddbe169e510031b.yaml | 5 - ...ealth-policy-actions-936db8bc3ed08aec.yaml | 7 - ...iple-detection-types-10bfdc80771278cb.yaml | 17 - ...th-policy-properties-056d5b4aa63312c9.yaml | 3 - ...ealth-policy-suspend-7aa33fc981c0f2c9.yaml | 5 - .../health-poll-url-236392171bb28b3f.yaml | 8 - ...-poll-url-detection-c6f10065a076510dr.yaml | 5 - .../notes/health-reboot-9f74c263f7fb6767.yaml | 3 - .../health-recover-9aecfbf2d799abfb.yaml | 4 - .../notes/heat-listener-b908d0988840e1f3.yaml | 4 - ...keystone-conformance-4e729da9e88b4fb3.yaml | 5 - .../kube-token-gen-673ea5c0d26d6872.yaml | 3 - ...ubernetes-dependents-1d7a70aa43ee8aa4.yaml | 5 - .../lb-name-instead-id-f30d4f4e05d350cb.yaml | 4 - .../lb-node-actions-95545338ae622f5c.yaml | 5 - .../notes/lb-policy-02782a1b98142742.yaml | 5 - .../lb-policy-improve-165680731fb76681.yaml | 5 - ...b-policy-improvement-2c18577717d28bb5.yaml | 5 - ...-project-restriction-688833a1aec6f04e.yaml | 4 - ...b-support-to-recover-8f822d3c2665e225.yaml | 5 - .../lb-timeout-option-990ba1f359b5daab.yaml | 4 - .../lifecycle-hook-19a9bf85b534107d.yaml | 5 - ...loadbalancer-octavia-8ab8be9f703781d1.yaml | 6 - ...eak-for-dead-service-0abd3d3ea333622c.yaml | 4 - .../notes/lock-retry-4d1c52ff4d42a3f9.yaml | 3 - .../notes/lock-retry-ab31681e74997cf9.yaml | 6 - .../message-receiver-3432826515f8e70c.yaml | 4 - .../notes/message-topic-7c642cff317f2bc7.yaml | 4 - ...tadata-query-profile-9c45d99db7b30207.yaml | 4 - ...re-policy-validation-ace6a4f890b2a500.yaml | 4 - ...re-server-operations-dd77e83b705c28f0.yaml | 4 - .../notes/new-api-doc-f21eb0a9f53d7643.yaml | 4 - .../new-config-options-a963e5841d35ef03.yaml | 14 - .../new-node-create-08fe53674b0baab2.yaml | 4 - .../node-action-logic-4d3e94818cccaa3e.yaml | 5 - .../notes/node-adopt-289a3cea24d8eb78.yaml | 5 - .../notes/node-check-50d4b67796e17afb.yaml | 5 - ...check-before-recover-abf887a39ab0d355.yaml | 6 - ...node-create-affinity-ec126ccd3e9e0957.yaml | 4 - .../node-create-az-d886dea98a25229f.yaml | 4 - .../node-create-region-0cbac0918c703e27.yaml | 4 - .../node-delete-force-e4a69831af0b145d.yaml | 3 - .../node-detail-volumes-8e29c734f4f43442.yaml | 3 - .../node-health-check-0c94b9fecf35e677.yaml | 3 - .../node-join-leave-8b00f64cf55b675a.yaml | 3 - .../node-name-formatter-284b768be7fbe6c6.yaml | 6 - .../notes/node-op-api-a7bede34c51854ee.yaml | 4 - ...node-op-return-value-73720cf91b6e2672.yaml | 4 - .../notes/node-ops-115d9d64f6e261db.yaml | 4 - .../node-physical-id-f3393fb1a1eba4f7.yaml | 5 - .../notes/node-recover-ace5311e23030f20.yaml | 8 - .../node-recover-fix-cc054c3f763654a0.yaml | 3 - .../notes/node-role-fix-211d1536dd66066d.yaml | 4 - .../notes/node-tainted-1d1c0f885cd3e4a8.yaml | 5 - ...ode-update-timestamp-43b9639e22267598.yaml | 4 - ...on-operation-recover-cf0f3c0ac62bb0f3.yaml | 5 - ...ification-operations-c7bdaa9b56e5011f.yaml | 4 - ...fication-retry-logic-cb9933b4826c9d45.yaml | 4 - ...notification-support-a7e2ebc816bb4009.yaml | 8 - ...tification-transport-ae49e9cb1813cd96.yaml | 4 - .../notes/nova-az-fccf8db758642d34.yaml | 5 - .../nova-get-image-726aa195c17a294f.yaml | 4 - .../nova-metadata-fix-89b7a2e06c3ce59f.yaml | 3 - ...nova-metadata-update-d1ab297f0e998117.yaml | 5 - ...ova-server-addresses-fd8afddc3fb36a0c.yaml | 5 - ...va-server-validation-60612c1185738104.yaml | 4 - ...va-server-validation-d36dbcf64fb90a43.yaml | 5 - .../nova-update-opt-7372e4d189e483aa.yaml | 4 - ...va-update-validation-dca7de984c2071d1.yaml | 4 - .../notes/ocata-2-c2e184a0b76231e8.yaml | 8 - ...nd-subnet_id-changes-9ba43e19ae29ac7d.yaml | 5 - .../options-shuffled-29c6cfac72aaf8ff.yaml | 5 - ...ioned-object-support-cc9463490306c26f.yaml | 4 - ...check-cluster-update-58d4712a33f74c6e.yaml | 4 - .../path-check-collect-1e542762cbcd65d2.yaml | 3 - .../policy-enabling-61d0c38aecf314eb.yaml | 5 - .../notes/policy-fixes-24857037ac054999.yaml | 5 - .../policy-in-code-05970b66eb27481a.yaml | 17 - .../policy-performance-4d2fa57ccc45bbf1.yaml | 4 - .../notes/policy-retry-251cf15f06368ad4.yaml | 5 - .../policy-validate-04cbc74d2c025fcc.yaml | 4 - .../policy-validation-477a103aa83835f9.yaml | 4 - .../profile-only-update-5cdb3ae46a8139a8.yaml | 7 - .../profile-type-ops-1f0f2e6e6b5b1999.yaml | 4 - .../profile-validate-45a9bc520880bc6b.yaml | 4 - .../receiver-create-71ae7367427bf81c.yaml | 4 - ...eceiver-create-check-2225f536f5150065.yaml | 5 - ...eceiver-create-trust-bd5fdeb059e68330.yaml | 3 - ...eiver-filter-by-user-ab35a2ab8e2690d1.yaml | 4 - .../receiver-update-f97dc556ce3bf22e.yaml | 4 - .../receiver-webhook-d972369731a6ed72.yaml | 5 - .../receiver-webhook-v2-a7a24ae6720b5151.yaml | 12 - .../notes/remove-bdm-v1-4533677f3bca3c5d.yaml | 4 - .../remove-py35-test-bc81b608d6afeb4a.yaml | 5 - .../requirement-update-941ebb5825ee9f29.yaml | 6 - ...ment-update-victoria-3b150cddd189db7d.yaml | 8 - .../notes/resize-params-ab4942dc11f05d9a.yaml | 5 - ...ng-policy-validation-e2a1d3049e03c316.yaml | 4 - .../schedule-improved-6996965f07450b35.yaml | 6 - ...cheduler-enhancement-09f86efe4dde4051.yaml | 3 - ...ler-thread-pool-size-40905866197ef8bd.yaml | 6 - .../secure-password-e60243ae2befbbf6.yaml | 4 - .../senlin-osprofiler-fc8cb7161bdb1a6e.yaml | 4 - ...rade-check-framework-b9db3bb9db8d1015.yaml | 13 - .../server-image-id-27c1619fa818c6a0.yaml | 4 - .../service-cleanup-afacddfacd7b4dcd.yaml | 4 - .../notes/service-list-5f4037ae52514f2a.yaml | 4 - ...ervice-status-report-625bc25b89907e07.yaml | 4 - .../service-update-2e96dd86295ddfa0.yaml | 4 - .../notes/setup-script-648e9bfb89bb6255.yaml | 5 - ...lifecycle-completion-b528464e11071666.yaml | 6 - ...split-engine-service-acea7821cadf9d00.yaml | 33 - .../support-status-f7383a53ddcae908.yaml | 4 - .../support-subnet-c2492ce8a377b1af.yaml | 4 - .../support-volume-type-07d608097c711460.yaml | 3 - ...o-alembic-migrations-f442d0b58c3f13a6.yaml | 4 - ...est-api-test-support-c86091a7ba5fb789.yaml | 4 - ...pest-functional-test-383dad4d9acff97e.yaml | 3 - .../notes/template-url-19075b68d9a35a80.yaml | 3 - .../test-python3-train-253c0e054dd9d1e3.yaml | 4 - ...est-python3-victoria-ec16705d40a167c0.yaml | 4 - .../timestamp-datatype-86c0e47debffa919.yaml | 4 - .../notes/tools-setup-d73e3298328c5355.yaml | 4 - .../trigger-version-af674cfe0f4693cd.yaml | 4 - .../notes/unicode-az-ee5ea4346b36eefb.yaml | 3 - ...unicode-cluster-name-3bd5b6eeac2566f1.yaml | 3 - ...rsioned-rpc-requests-2df5d878c279e933.yaml | 5 - .../vm-lock-unlock-da4c3095575c9c94.yaml | 4 - .../notes/vm-migrate-6c6adee51ee8ed24.yaml | 4 - .../vm-pause-unpause-3e414ce4d86c7ed3.yaml | 4 - .../vm-rescue-unrescue-f56047419c50e957.yaml | 4 - .../notes/vm-start-stop-e590e25a04fff1e0.yaml | 4 - .../vm-suspend-resume-a4398520255e6bbd.yaml | 4 - .../notes/webhook-fix-792322c0b7f374aa.yaml | 4 - .../notes/zaqar-support-470e824b7737e939.yaml | 4 - releasenotes/source/2023.1.rst | 6 - releasenotes/source/2023.2.rst | 6 - releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 259 -- releasenotes/source/index.rst | 37 - .../locale/en_GB/LC_MESSAGES/releasenotes.po | 166 - .../locale/fr/LC_MESSAGES/releasenotes.po | 63 - .../locale/zh_CN/LC_MESSAGES/releasenotes.po | 212 -- releasenotes/source/mitaka.rst | 6 - releasenotes/source/newton.rst | 6 - releasenotes/source/ocata.rst | 6 - releasenotes/source/pike.rst | 6 - releasenotes/source/queens.rst | 6 - releasenotes/source/rocky.rst | 6 - releasenotes/source/stein.rst | 6 - releasenotes/source/train.rst | 6 - releasenotes/source/unreleased.rst | 5 - releasenotes/source/ussuri.rst | 6 - releasenotes/source/victoria.rst | 6 - releasenotes/source/wallaby.rst | 6 - releasenotes/source/xena.rst | 6 - releasenotes/source/yoga.rst | 6 - releasenotes/source/zed.rst | 6 - requirements.txt | 37 - senlin/__init__.py | 0 senlin/api/__init__.py | 0 senlin/api/common/__init__.py | 0 senlin/api/common/serializers.py | 91 - senlin/api/common/util.py | 118 - senlin/api/common/version_request.py | 100 - senlin/api/common/versioned_method.py | 34 - senlin/api/common/wsgi.py | 943 ------ senlin/api/middleware/__init__.py | 37 - senlin/api/middleware/context.py | 85 - senlin/api/middleware/fault.py | 119 - senlin/api/middleware/trust.py | 75 - senlin/api/middleware/version_negotiation.py | 148 - senlin/api/middleware/webhook.py | 114 - senlin/api/openstack/__init__.py | 0 senlin/api/openstack/history.rst | 132 - senlin/api/openstack/v1/__init__.py | 0 senlin/api/openstack/v1/actions.py | 137 - senlin/api/openstack/v1/build_info.py | 42 - senlin/api/openstack/v1/cluster_policies.py | 63 - senlin/api/openstack/v1/clusters.py | 332 -- senlin/api/openstack/v1/events.py | 69 - senlin/api/openstack/v1/nodes.py | 243 -- senlin/api/openstack/v1/policies.py | 104 - senlin/api/openstack/v1/policy_types.py | 54 - senlin/api/openstack/v1/profile_types.py | 62 - senlin/api/openstack/v1/profiles.py | 104 - senlin/api/openstack/v1/receivers.py | 105 - senlin/api/openstack/v1/router.py | 336 -- senlin/api/openstack/v1/services.py | 53 - senlin/api/openstack/v1/version.py | 88 - senlin/api/openstack/v1/webhooks.py | 61 - senlin/api/openstack/versions.py | 59 - senlin/cmd/__init__.py | 23 - senlin/cmd/api.py | 53 - senlin/cmd/api_wsgi.py | 38 - senlin/cmd/conductor.py | 52 - senlin/cmd/engine.py | 51 - senlin/cmd/health_manager.py | 51 - senlin/cmd/manage.py | 232 -- senlin/cmd/status.py | 91 - senlin/common/__init__.py | 0 senlin/common/config.py | 69 - senlin/common/constraints.py | 90 - senlin/common/consts.py | 362 --- senlin/common/context.py | 125 - senlin/common/exception.py | 307 -- senlin/common/i18n.py | 25 - senlin/common/messaging.py | 136 - senlin/common/policies/__init__.py | 52 - senlin/common/policies/actions.py | 58 - senlin/common/policies/base.py | 36 - senlin/common/policies/build_info.py | 36 - senlin/common/policies/cluster_policies.py | 80 - senlin/common/policies/clusters.py | 113 - senlin/common/policies/events.py | 47 - senlin/common/policies/nodes.py | 124 - senlin/common/policies/policies.py | 91 - senlin/common/policies/policy_types.py | 47 - senlin/common/policies/profile_types.py | 58 - senlin/common/policies/profiles.py | 91 - senlin/common/policies/receivers.py | 91 - senlin/common/policies/services.py | 36 - senlin/common/policies/webhooks.py | 36 - senlin/common/policy.py | 58 - senlin/common/profiler.py | 45 - senlin/common/scaleutils.py | 328 -- senlin/common/schema.py | 536 ---- senlin/common/service.py | 121 - senlin/common/utils.py | 242 -- senlin/conductor/__init__.py | 0 senlin/conductor/service.py | 2605 ---------------- senlin/conf/__init__.py | 39 - senlin/conf/api.py | 70 - senlin/conf/authentication.py | 47 - senlin/conf/base.py | 125 - senlin/conf/conductor.py | 36 - senlin/conf/dispatchers.py | 34 - senlin/conf/engine.py | 40 - senlin/conf/health_manager.py | 50 - senlin/conf/notification.py | 39 - senlin/conf/opts.py | 92 - senlin/conf/receiver.py | 44 - senlin/conf/revision.py | 33 - senlin/conf/zaqar.py | 33 - senlin/db/__init__.py | 0 senlin/db/api.py | 544 ---- senlin/db/sqlalchemy/__init__.py | 0 senlin/db/sqlalchemy/alembic.ini | 105 - senlin/db/sqlalchemy/alembic/README | 4 - senlin/db/sqlalchemy/alembic/__init__.py | 0 senlin/db/sqlalchemy/alembic/env.py | 99 - senlin/db/sqlalchemy/alembic/legacy_utils.py | 45 - senlin/db/sqlalchemy/alembic/script.py.mako | 32 - .../versions/004f8202c264_action_clusterid.py | 37 - .../0c04e812f224_user_project_length.py | 70 - .../versions/3a04debb8cb1_cluster_config.py | 38 - .../versions/569eb0b8_first_version.py | 246 -- .../versions/5b7cb185e0a5_registry_enable.py | 37 - .../662f8e74ac6f_event_column_name.py | 47 - .../versions/6f73af60_service_table.py | 51 - ...563afc4d_node_cluster_dependents_column.py | 39 - .../versions/aaa7e7755feb_node_tainted.py | 37 - .../versions/ab7b23c67360_health_registry.py | 52 - ...fe13cf8e5_action_starttime_endtime_type.py | 43 - .../versions/c3e2bfa76dea_action_tenant.py | 39 - senlin/db/sqlalchemy/api.py | 1887 ------------ senlin/db/sqlalchemy/migration.py | 50 - senlin/db/sqlalchemy/models.py | 296 -- senlin/db/sqlalchemy/types.py | 132 - senlin/db/sqlalchemy/utils.py | 127 - senlin/drivers/__init__.py | 0 senlin/drivers/base.py | 48 - senlin/drivers/container/__init__.py | 0 senlin/drivers/container/docker_v1.py | 58 - senlin/drivers/os/__init__.py | 34 - senlin/drivers/os/cinder_v2.py | 64 - senlin/drivers/os/glance_v2.py | 35 - senlin/drivers/os/heat_v1.py | 91 - senlin/drivers/os/keystone_v3.py | 164 - senlin/drivers/os/lbaas.py | 374 --- senlin/drivers/os/mistral_v2.py | 72 - senlin/drivers/os/neutron_v2.py | 190 -- senlin/drivers/os/nova_v2.py | 332 -- senlin/drivers/os/octavia_v2.py | 193 -- senlin/drivers/os/zaqar_v2.py | 70 - senlin/drivers/sdk.py | 194 -- senlin/engine/__init__.py | 0 senlin/engine/actions/__init__.py | 0 senlin/engine/actions/base.py | 692 ----- senlin/engine/actions/cluster_action.py | 1256 -------- senlin/engine/actions/custom_action.py | 25 - senlin/engine/actions/node_action.py | 297 -- senlin/engine/cluster.py | 575 ---- senlin/engine/cluster_policy.py | 105 - senlin/engine/dispatcher.py | 59 - senlin/engine/environment.py | 228 -- senlin/engine/event.py | 103 - senlin/engine/health_manager.py | 863 ------ senlin/engine/node.py | 476 --- senlin/engine/notifications/__init__.py | 0 senlin/engine/notifications/base.py | 40 - senlin/engine/notifications/heat_endpoint.py | 80 - senlin/engine/notifications/message.py | 108 - senlin/engine/notifications/nova_endpoint.py | 88 - senlin/engine/parser.py | 79 - senlin/engine/receivers/__init__.py | 0 senlin/engine/receivers/base.py | 248 -- senlin/engine/receivers/message.py | 290 -- senlin/engine/receivers/webhook.py | 65 - senlin/engine/registry.py | 143 - senlin/engine/senlin_lock.py | 158 - senlin/engine/service.py | 187 -- senlin/events/__init__.py | 0 senlin/events/base.py | 49 - senlin/events/database.py | 63 - senlin/events/message.py | 69 - senlin/hacking/__init__.py | 0 senlin/hacking/checks.py | 93 - senlin/health_manager/__init__.py | 0 senlin/health_manager/service.py | 140 - senlin/locale/de/LC_MESSAGES/senlin.po | 1897 ------------ senlin/objects/__init__.py | 47 - senlin/objects/action.py | 216 -- senlin/objects/base.py | 160 - senlin/objects/cluster.py | 163 - senlin/objects/cluster_lock.py | 44 - senlin/objects/cluster_policy.py | 101 - senlin/objects/credential.py | 53 - senlin/objects/dependency.py | 40 - senlin/objects/event.py | 87 - senlin/objects/fields.py | 534 ---- senlin/objects/health_registry.py | 76 - senlin/objects/node.py | 189 -- senlin/objects/node_lock.py | 43 - senlin/objects/notification.py | 283 -- senlin/objects/policy.py | 117 - senlin/objects/profile.py | 117 - senlin/objects/receiver.py | 121 - senlin/objects/requests/__init__.py | 0 senlin/objects/requests/actions.py | 96 - senlin/objects/requests/build_info.py | 19 - senlin/objects/requests/cluster_policies.py | 37 - senlin/objects/requests/clusters.py | 297 -- senlin/objects/requests/credentials.py | 39 - senlin/objects/requests/events.py | 46 - senlin/objects/requests/nodes.py | 165 - senlin/objects/requests/policies.py | 95 - senlin/objects/requests/policy_type.py | 28 - senlin/objects/requests/profile_type.py | 36 - senlin/objects/requests/profiles.py | 97 - senlin/objects/requests/receivers.py | 108 - senlin/objects/requests/webhooks.py | 40 - senlin/objects/service.py | 66 - senlin/policies/__init__.py | 0 senlin/policies/affinity_policy.py | 305 -- senlin/policies/base.py | 373 --- senlin/policies/batch_policy.py | 171 -- senlin/policies/deletion_policy.py | 273 -- senlin/policies/health_policy.py | 517 ---- senlin/policies/lb_policy.py | 746 ----- senlin/policies/region_placement.py | 283 -- senlin/policies/scaling_policy.py | 283 -- senlin/policies/zone_placement.py | 267 -- senlin/profiles/__init__.py | 0 senlin/profiles/base.py | 596 ---- senlin/profiles/container/__init__.py | 0 senlin/profiles/container/docker.py | 451 --- senlin/profiles/os/__init__.py | 0 senlin/profiles/os/heat/__init__.py | 0 senlin/profiles/os/heat/stack.py | 412 --- senlin/profiles/os/nova/__init__.py | 0 senlin/profiles/os/nova/server.py | 2182 ------------- senlin/rpc/__init__.py | 0 senlin/rpc/client.py | 73 - senlin/tests/__init__.py | 0 senlin/tests/drivers/__init__.py | 0 senlin/tests/drivers/os_test/README.rst | 11 - senlin/tests/drivers/os_test/__init__.py | 35 - senlin/tests/drivers/os_test/cinder_v2.py | 100 - senlin/tests/drivers/os_test/glance_v2.py | 40 - senlin/tests/drivers/os_test/heat_v1.py | 85 - senlin/tests/drivers/os_test/keystone_v3.py | 156 - senlin/tests/drivers/os_test/lbaas.py | 39 - senlin/tests/drivers/os_test/mistral_v2.py | 42 - senlin/tests/drivers/os_test/neutron_v2.py | 64 - senlin/tests/drivers/os_test/nova_v2.py | 280 -- senlin/tests/drivers/os_test/octavia_v2.py | 224 -- senlin/tests/drivers/os_test/zaqar_v2.py | 74 - senlin/tests/unit/__init__.py | 26 - senlin/tests/unit/api/__init__.py | 0 senlin/tests/unit/api/common/__init__.py | 0 .../tests/unit/api/common/test_serializers.py | 201 -- senlin/tests/unit/api/common/test_util.py | 230 -- .../unit/api/common/test_version_request.py | 114 - senlin/tests/unit/api/common/test_wsgi.py | 466 --- senlin/tests/unit/api/middleware/__init__.py | 0 .../api/middleware/policy/check_admin.json | 3 - .../api/middleware/policy/notallowed.json | 10 - .../tests/unit/api/middleware/test_context.py | 126 - .../tests/unit/api/middleware/test_fault.py | 246 -- .../api/middleware/test_middleware_filters.py | 81 - .../tests/unit/api/middleware/test_trust.py | 212 -- .../middleware/test_version_negotiation.py | 284 -- .../tests/unit/api/middleware/test_webhook.py | 268 -- senlin/tests/unit/api/openstack/__init__.py | 0 .../tests/unit/api/openstack/test_versions.py | 60 - .../tests/unit/api/openstack/v1/__init__.py | 0 .../unit/api/openstack/v1/test_actions.py | 486 --- .../unit/api/openstack/v1/test_buildinfo.py | 77 - .../api/openstack/v1/test_cluster_policies.py | 257 -- .../unit/api/openstack/v1/test_clusters.py | 1531 ---------- .../unit/api/openstack/v1/test_events.py | 306 -- .../tests/unit/api/openstack/v1/test_nodes.py | 1151 ------- .../unit/api/openstack/v1/test_policies.py | 686 ----- .../api/openstack/v1/test_policy_types.py | 256 -- .../api/openstack/v1/test_profile_types.py | 350 --- .../unit/api/openstack/v1/test_profiles.py | 809 ----- .../unit/api/openstack/v1/test_receivers.py | 744 ----- .../unit/api/openstack/v1/test_router.py | 454 --- .../unit/api/openstack/v1/test_services.py | 70 - .../unit/api/openstack/v1/test_version.py | 108 - .../unit/api/openstack/v1/test_webhooks.py | 173 -- senlin/tests/unit/api/shared.py | 135 - senlin/tests/unit/cmd/__init__.py | 0 senlin/tests/unit/cmd/test_conductor.py | 55 - senlin/tests/unit/cmd/test_engine.py | 55 - senlin/tests/unit/cmd/test_health_manager.py | 55 - senlin/tests/unit/cmd/test_status.py | 75 - senlin/tests/unit/common/__init__.py | 0 senlin/tests/unit/common/base.py | 191 -- senlin/tests/unit/common/utils.py | 117 - senlin/tests/unit/conductor/__init__.py | 0 .../tests/unit/conductor/service/__init__.py | 0 .../unit/conductor/service/test_actions.py | 240 -- .../unit/conductor/service/test_cluster_op.py | 289 -- .../service/test_cluster_policies.py | 412 --- .../unit/conductor/service/test_clusters.py | 2188 ------------- .../conductor/service/test_credentials.py | 101 - .../unit/conductor/service/test_events.py | 173 -- .../unit/conductor/service/test_nodes.py | 1206 -------- .../unit/conductor/service/test_policies.py | 375 --- .../conductor/service/test_policy_types.py | 85 - .../conductor/service/test_profile_types.py | 120 - .../unit/conductor/service/test_profiles.py | 375 --- .../unit/conductor/service/test_receivers.py | 431 --- .../unit/conductor/service/test_webhooks.py | 233 -- senlin/tests/unit/conductor/test_service.py | 186 -- senlin/tests/unit/db/__init__.py | 0 senlin/tests/unit/db/shared.py | 193 -- senlin/tests/unit/db/test_action_api.py | 807 ----- senlin/tests/unit/db/test_cluster_api.py | 508 --- .../tests/unit/db/test_cluster_policy_api.py | 336 -- senlin/tests/unit/db/test_cred_api.py | 105 - senlin/tests/unit/db/test_event_api.py | 552 ---- senlin/tests/unit/db/test_lock_api.py | 555 ---- senlin/tests/unit/db/test_migration.py | 126 - senlin/tests/unit/db/test_node_api.py | 681 ----- senlin/tests/unit/db/test_policy_api.py | 413 --- senlin/tests/unit/db/test_profile_api.py | 370 --- senlin/tests/unit/db/test_receiver_api.py | 345 --- senlin/tests/unit/db/test_registry_api.py | 170 -- senlin/tests/unit/db/test_service_api.py | 114 - senlin/tests/unit/db/test_sqlalchemy_types.py | 144 - senlin/tests/unit/db/test_sqlalchemy_utils.py | 119 - senlin/tests/unit/drivers/__init__.py | 0 senlin/tests/unit/drivers/test_cinder_v2.py | 107 - senlin/tests/unit/drivers/test_docker_v1.py | 115 - senlin/tests/unit/drivers/test_driver.py | 47 - senlin/tests/unit/drivers/test_glance_v2.py | 83 - senlin/tests/unit/drivers/test_heat_v1.py | 137 - senlin/tests/unit/drivers/test_keystone_v3.py | 294 -- senlin/tests/unit/drivers/test_lbaas.py | 871 ------ senlin/tests/unit/drivers/test_mistral_v2.py | 117 - senlin/tests/unit/drivers/test_neutron_v2.py | 158 - senlin/tests/unit/drivers/test_nova_v2.py | 645 ---- senlin/tests/unit/drivers/test_octavia_v2.py | 342 --- senlin/tests/unit/drivers/test_sdk.py | 293 -- senlin/tests/unit/drivers/test_zaqar_v2.py | 146 - senlin/tests/unit/engine/__init__.py | 0 senlin/tests/unit/engine/actions/__init__.py | 0 .../unit/engine/actions/test_action_base.py | 1202 -------- .../unit/engine/actions/test_add_nodes.py | 296 -- .../unit/engine/actions/test_attach_policy.py | 109 - .../tests/unit/engine/actions/test_check.py | 202 -- .../engine/actions/test_cluster_action.py | 276 -- .../tests/unit/engine/actions/test_create.py | 288 -- .../unit/engine/actions/test_custom_action.py | 36 - .../unit/engine/actions/test_del_nodes.py | 221 -- .../tests/unit/engine/actions/test_delete.py | 1096 ------- .../unit/engine/actions/test_node_action.py | 944 ------ .../unit/engine/actions/test_operation.py | 139 - .../tests/unit/engine/actions/test_recover.py | 361 --- .../unit/engine/actions/test_replace_nodes.py | 296 -- .../tests/unit/engine/actions/test_resize.py | 263 -- .../unit/engine/actions/test_scale_in.py | 207 -- .../unit/engine/actions/test_scale_out.py | 205 -- .../tests/unit/engine/actions/test_update.py | 333 -- .../unit/engine/actions/test_update_policy.py | 79 - senlin/tests/unit/engine/actions/test_wait.py | 90 - .../unit/engine/notifications/__init__.py | 0 .../notifications/test_heat_endpoint.py | 230 -- .../unit/engine/notifications/test_message.py | 149 - .../notifications/test_nova_endpoint.py | 213 -- .../tests/unit/engine/receivers/__init__.py | 0 .../unit/engine/receivers/test_message.py | 710 ----- .../unit/engine/receivers/test_receiver.py | 411 --- .../unit/engine/receivers/test_webhook.py | 91 - senlin/tests/unit/engine/test_cluster.py | 1072 ------- .../tests/unit/engine/test_cluster_policy.py | 142 - .../tests/unit/engine/test_engine_parser.py | 133 - senlin/tests/unit/engine/test_environment.py | 346 --- senlin/tests/unit/engine/test_event.py | 222 -- .../tests/unit/engine/test_health_manager.py | 1619 ---------- senlin/tests/unit/engine/test_node.py | 1217 -------- senlin/tests/unit/engine/test_registry.py | 221 -- senlin/tests/unit/engine/test_senlin_lock.py | 190 -- senlin/tests/unit/engine/test_service.py | 396 --- senlin/tests/unit/events/__init__.py | 0 senlin/tests/unit/events/test_base.py | 73 - senlin/tests/unit/events/test_database.py | 122 - senlin/tests/unit/events/test_message.py | 267 -- senlin/tests/unit/fakes.py | 89 - senlin/tests/unit/health_manager/__init__.py | 0 .../tests/unit/health_manager/test_service.py | 195 -- senlin/tests/unit/objects/__init__.py | 0 .../tests/unit/objects/requests/__init__.py | 0 .../unit/objects/requests/test_actions.py | 155 - .../objects/requests/test_cluster_policies.py | 98 - .../unit/objects/requests/test_clusters.py | 587 ---- .../unit/objects/requests/test_credentials.py | 79 - .../unit/objects/requests/test_events.py | 59 - .../tests/unit/objects/requests/test_nodes.py | 250 -- .../unit/objects/requests/test_policies.py | 249 -- .../unit/objects/requests/test_policy_type.py | 45 - .../objects/requests/test_profile_type.py | 62 - .../unit/objects/requests/test_profiles.py | 264 -- .../unit/objects/requests/test_receivers.py | 164 - .../unit/objects/requests/test_webhooks.py | 91 - senlin/tests/unit/objects/test_action.py | 90 - senlin/tests/unit/objects/test_base.py | 160 - senlin/tests/unit/objects/test_cluster.py | 167 - senlin/tests/unit/objects/test_event.py | 73 - senlin/tests/unit/objects/test_fields.py | 767 ----- .../unit/objects/test_health_registry.py | 121 - senlin/tests/unit/objects/test_node.py | 137 - .../tests/unit/objects/test_notification.py | 608 ---- senlin/tests/unit/objects/test_policy.py | 93 - senlin/tests/unit/objects/test_profile.py | 93 - senlin/tests/unit/objects/test_receiver.py | 92 - senlin/tests/unit/policies/__init__.py | 0 senlin/tests/unit/policies/test_affinity.py | 853 ------ .../tests/unit/policies/test_batch_policy.py | 183 -- .../unit/policies/test_deletion_policy.py | 548 ---- .../tests/unit/policies/test_health_policy.py | 529 ---- senlin/tests/unit/policies/test_lb_policy.py | 1583 ---------- senlin/tests/unit/policies/test_policy.py | 601 ---- .../unit/policies/test_region_placement.py | 426 --- .../unit/policies/test_scaling_policy.py | 450 --- .../unit/policies/test_zone_placement.py | 418 --- senlin/tests/unit/profiles/__init__.py | 0 .../unit/profiles/test_container_docker.py | 817 ----- senlin/tests/unit/profiles/test_heat_stack.py | 1008 ------ .../tests/unit/profiles/test_nova_server.py | 2713 ----------------- .../unit/profiles/test_nova_server_update.py | 1699 ----------- .../profiles/test_nova_server_validate.py | 966 ------ .../tests/unit/profiles/test_profile_base.py | 1007 ------ senlin/tests/unit/test_common_constraints.py | 239 -- senlin/tests/unit/test_common_context.py | 74 - senlin/tests/unit/test_common_exception.py | 36 - senlin/tests/unit/test_common_messaging.py | 136 - senlin/tests/unit/test_common_policy.py | 42 - senlin/tests/unit/test_common_scaleutils.py | 426 --- senlin/tests/unit/test_common_schema.py | 956 ------ senlin/tests/unit/test_common_utils.py | 299 -- senlin/tests/unit/test_conf.py | 54 - senlin/tests/unit/test_hacking.py | 127 - senlin/tests/unit/test_rpc_client.py | 129 - senlin/version.py | 17 - setup.cfg | 81 - setup.py | 21 - test-requirements.txt | 16 - tools/README.rst | 60 - tools/config-generator.conf | 15 - tools/gen-config | 3 - tools/gen-policy | 3 - tools/gen-pot-files | 3 - tools/policy-generator.conf | 3 - tools/senlin-db-recreate | 15 - tools/setup-service | 82 - tox.ini | 120 - uninstall.sh | 21 - 955 files changed, 8 insertions(+), 128411 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 FEATURES.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 TODO.rst delete mode 100644 api-ref/source/actions.inc delete mode 100644 api-ref/source/build_info.inc delete mode 100644 api-ref/source/cluster_policies.inc delete mode 100644 api-ref/source/clusters.inc delete mode 100644 api-ref/source/conf.py delete mode 100644 api-ref/source/events.inc delete mode 100644 api-ref/source/index.rst delete mode 100644 api-ref/source/nodes.inc delete mode 100644 api-ref/source/parameters.yaml delete mode 100644 api-ref/source/policies.inc delete mode 100644 api-ref/source/policy_types.inc delete mode 100644 api-ref/source/profile_types.inc delete mode 100644 api-ref/source/profiles.inc delete mode 100644 api-ref/source/receivers.inc delete mode 100644 api-ref/source/samples/action-get-request.json delete mode 100644 api-ref/source/samples/action-get-response.json delete mode 100644 api-ref/source/samples/actions-list-response.json delete mode 100644 api-ref/source/samples/build-show-response.json delete mode 100644 api-ref/source/samples/cluster-action-response.json delete mode 100644 api-ref/source/samples/cluster-add-nodes-request.json delete mode 100644 api-ref/source/samples/cluster-attach-policy-request.json delete mode 100644 api-ref/source/samples/cluster-attrs-list-response.json delete mode 100644 api-ref/source/samples/cluster-check-request.json delete mode 100644 api-ref/source/samples/cluster-complete-lifecycle-request.json delete mode 100644 api-ref/source/samples/cluster-create-request.json delete mode 100644 api-ref/source/samples/cluster-create-response.json delete mode 100644 api-ref/source/samples/cluster-del-nodes-request.json delete mode 100644 api-ref/source/samples/cluster-detach-policy-request.json delete mode 100644 api-ref/source/samples/cluster-list-response.json delete mode 100644 api-ref/source/samples/cluster-operation-request.json delete mode 100644 api-ref/source/samples/cluster-policies-list-response.json delete mode 100644 api-ref/source/samples/cluster-policy-show-response.json delete mode 100644 api-ref/source/samples/cluster-recover-request.json delete mode 100644 api-ref/source/samples/cluster-replace-nodes-request.json delete mode 100644 api-ref/source/samples/cluster-resize-request.json delete mode 100644 api-ref/source/samples/cluster-scale-in-request.json delete mode 100644 api-ref/source/samples/cluster-scale-out-request.json delete mode 100644 api-ref/source/samples/cluster-show-response.json delete mode 100644 api-ref/source/samples/cluster-update-policy-request.json delete mode 100644 api-ref/source/samples/cluster-update-request.json delete mode 100644 api-ref/source/samples/cluster-update-response.json delete mode 100644 api-ref/source/samples/clusters-list-response.json delete mode 100644 api-ref/source/samples/event-show-response.json delete mode 100644 api-ref/source/samples/events-list-response.json delete mode 100644 api-ref/source/samples/node-action-response.json delete mode 100644 api-ref/source/samples/node-adopt-preview-request.json delete mode 100644 api-ref/source/samples/node-adopt-preview-response.json delete mode 100644 api-ref/source/samples/node-adopt-request.json delete mode 100644 api-ref/source/samples/node-adopt-response.json delete mode 100644 api-ref/source/samples/node-check-request.json delete mode 100644 api-ref/source/samples/node-create-request.json delete mode 100644 api-ref/source/samples/node-create-response.json delete mode 100644 api-ref/source/samples/node-list-response.json delete mode 100644 api-ref/source/samples/node-operation-request.json delete mode 100644 api-ref/source/samples/node-recover-request.json delete mode 100644 api-ref/source/samples/node-show-response.json delete mode 100644 api-ref/source/samples/node-update-request.json delete mode 100644 api-ref/source/samples/policy-create-request.json delete mode 100644 api-ref/source/samples/policy-create-response.json delete mode 100644 api-ref/source/samples/policy-list-response.json delete mode 100644 api-ref/source/samples/policy-show-response.json delete mode 100644 api-ref/source/samples/policy-type-show-response-v1.5.json delete mode 100644 api-ref/source/samples/policy-type-show-response.json delete mode 100644 api-ref/source/samples/policy-types-list-response-v1.5.json delete mode 100644 api-ref/source/samples/policy-types-list-response.json delete mode 100644 api-ref/source/samples/policy-update-request.json delete mode 100644 api-ref/source/samples/policy-update-response.json delete mode 100644 api-ref/source/samples/policy-validate-request.json delete mode 100644 api-ref/source/samples/policy-validate-response.json delete mode 100644 api-ref/source/samples/profile-create-request.json delete mode 100644 api-ref/source/samples/profile-create-response.json delete mode 100644 api-ref/source/samples/profile-list-response.json delete mode 100644 api-ref/source/samples/profile-show-response.json delete mode 100644 api-ref/source/samples/profile-type-ops-response.json delete mode 100644 api-ref/source/samples/profile-type-show-response-v1.5.json delete mode 100644 api-ref/source/samples/profile-type-show-response.json delete mode 100644 api-ref/source/samples/profile-types-list-response-v1.5.json delete mode 100644 api-ref/source/samples/profile-types-list-response.json delete mode 100644 api-ref/source/samples/profile-update-request.json delete mode 100644 api-ref/source/samples/profile-update-response.json delete mode 100644 api-ref/source/samples/profile-validate-request.json delete mode 100644 api-ref/source/samples/profile-validate-response.json delete mode 100644 api-ref/source/samples/receiver-create-request.json delete mode 100644 api-ref/source/samples/receiver-create-response.json delete mode 100644 api-ref/source/samples/receiver-show-response.json delete mode 100644 api-ref/source/samples/receiver-update-request.json delete mode 100644 api-ref/source/samples/receiver-update-response.json delete mode 100644 api-ref/source/samples/receivers-list-response.json delete mode 100644 api-ref/source/samples/services-list-response.json delete mode 100644 api-ref/source/samples/version-show-response.json delete mode 100644 api-ref/source/samples/versions-list-response.json delete mode 100644 api-ref/source/samples/webhook-action-response.json delete mode 100644 api-ref/source/services.inc delete mode 100644 api-ref/source/status.yaml delete mode 100644 api-ref/source/versions.inc delete mode 100644 api-ref/source/webhooks.inc delete mode 100644 bindep.txt delete mode 100644 contrib/kubernetes/README.rst delete mode 100644 contrib/kubernetes/TODO.rst delete mode 100644 contrib/kubernetes/examples/kubemaster.yaml delete mode 100644 contrib/kubernetes/examples/kubenode.yaml delete mode 100644 contrib/kubernetes/kube/__init__.py delete mode 100644 contrib/kubernetes/kube/base.py delete mode 100644 contrib/kubernetes/kube/master.py delete mode 100644 contrib/kubernetes/kube/scripts/master.sh delete mode 100644 contrib/kubernetes/kube/scripts/worker.sh delete mode 100644 contrib/kubernetes/kube/worker.py delete mode 100644 contrib/kubernetes/requirements.txt delete mode 100644 contrib/kubernetes/setup.cfg delete mode 100644 contrib/kubernetes/setup.py delete mode 100644 contrib/vdu/README.rst delete mode 100644 contrib/vdu/examples/vdu.yaml delete mode 100644 contrib/vdu/requirements.txt delete mode 100644 contrib/vdu/setup.cfg delete mode 100644 contrib/vdu/setup.py delete mode 100644 contrib/vdu/vdu/__init__.py delete mode 100644 contrib/vdu/vdu/server.py delete mode 100644 devstack/README.rst delete mode 100644 devstack/files/apache-senlin-api.template delete mode 100644 devstack/lib/senlin delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100644 doc/.gitignore delete mode 100644 doc/Makefile delete mode 100644 doc/README.rst delete mode 100644 doc/requirements.txt delete mode 100644 doc/source/admin/authentication.rst delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/configuration/config.rst delete mode 100644 doc/source/configuration/index.rst delete mode 100644 doc/source/configuration/policy.rst delete mode 100644 doc/source/configuration/sample-policy-yaml.rst delete mode 100644 doc/source/contributor/action.rst delete mode 100644 doc/source/contributor/api_microversion.rst delete mode 100644 doc/source/contributor/authorization.rst delete mode 100644 doc/source/contributor/cluster.rst delete mode 100644 doc/source/contributor/event_dispatcher.rst delete mode 100644 doc/source/contributor/node.rst delete mode 100644 doc/source/contributor/osprofiler.rst delete mode 100644 doc/source/contributor/plugin_guide.rst delete mode 100644 doc/source/contributor/policies/affinity_v1.rst delete mode 100644 doc/source/contributor/policies/deletion_v1.rst delete mode 100644 doc/source/contributor/policies/health_v1.rst delete mode 100644 doc/source/contributor/policies/load_balance_v1.rst delete mode 100644 doc/source/contributor/policies/region_v1.rst delete mode 100644 doc/source/contributor/policies/scaling_v1.rst delete mode 100644 doc/source/contributor/policies/zone_v1.rst delete mode 100644 doc/source/contributor/policy.rst delete mode 100644 doc/source/contributor/policy_type.rst delete mode 100644 doc/source/contributor/profile.rst delete mode 100644 doc/source/contributor/profile_type.rst delete mode 100644 doc/source/contributor/receiver.rst delete mode 100644 doc/source/contributor/reviews.rst delete mode 100644 doc/source/contributor/testing.rst delete mode 100644 doc/source/ext/__init__.py delete mode 100644 doc/source/ext/resources.py delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install-devstack.rst delete mode 100644 doc/source/install/install-rdo.rst delete mode 100644 doc/source/install/install-source.rst delete mode 100644 doc/source/install/verify.rst delete mode 100644 doc/source/overview.rst delete mode 100644 doc/source/reference/api.rst delete mode 100644 doc/source/reference/glossary.rst delete mode 100644 doc/source/reference/man/index.rst delete mode 100644 doc/source/reference/man/senlin-api.rst delete mode 100644 doc/source/reference/man/senlin-conductor.rst delete mode 100644 doc/source/reference/man/senlin-engine.rst delete mode 100644 doc/source/reference/man/senlin-health-manager.rst delete mode 100644 doc/source/reference/man/senlin-manage.rst delete mode 100644 doc/source/reference/man/senlin-status.rst delete mode 100644 doc/source/scenarios/affinity.rst delete mode 100644 doc/source/scenarios/autoscaling_ceilometer.rst delete mode 100644 doc/source/scenarios/autoscaling_heat.rst delete mode 100644 doc/source/scenarios/autoscaling_overview.rst delete mode 100644 doc/source/scenarios/ex_lbas.yaml delete mode 100644 doc/source/tutorial/autoscaling.rst delete mode 100644 doc/source/tutorial/basics.rst delete mode 100644 doc/source/tutorial/policies.rst delete mode 100644 doc/source/tutorial/receivers.rst delete mode 100644 doc/source/user/actions.rst delete mode 100644 doc/source/user/bindings.rst delete mode 100644 doc/source/user/clusters.rst delete mode 100644 doc/source/user/events.rst delete mode 100644 doc/source/user/membership.rst delete mode 100644 doc/source/user/nodes.rst delete mode 100644 doc/source/user/policies.rst delete mode 100644 doc/source/user/policy_types.rst delete mode 100644 doc/source/user/policy_types/affinity.rst delete mode 100644 doc/source/user/policy_types/batch.rst delete mode 100644 doc/source/user/policy_types/deletion.rst delete mode 100644 doc/source/user/policy_types/health.rst delete mode 100644 doc/source/user/policy_types/load_balancing.rst delete mode 100644 doc/source/user/policy_types/region_placement.rst delete mode 100644 doc/source/user/policy_types/scaling.rst delete mode 100644 doc/source/user/policy_types/zone_placement.rst delete mode 100644 doc/source/user/profile_types.rst delete mode 100644 doc/source/user/profile_types/docker.rst delete mode 100644 doc/source/user/profile_types/nova.rst delete mode 100644 doc/source/user/profile_types/stack.rst delete mode 100644 doc/source/user/profiles.rst delete mode 100644 doc/source/user/receivers.rst delete mode 100644 doc/specs/README.rst delete mode 100644 doc/specs/approved/README.rst delete mode 100644 doc/specs/approved/container-cluster.rst delete mode 100644 doc/specs/approved/generic-event.rst delete mode 100644 doc/specs/cluster-fast-scaling.rst delete mode 100644 doc/specs/fail-fast-on-locked_resource.rst delete mode 100644 doc/specs/lifecycle-hook.rst delete mode 100644 doc/specs/multiple-detection-modes.rst delete mode 100644 doc/specs/rejected/README.rst delete mode 100644 doc/specs/template.rst delete mode 100644 doc/specs/workflow-recover.rst delete mode 100644 etc/senlin/README-senlin.conf.txt delete mode 100644 etc/senlin/api-paste.ini delete mode 100644 examples/policies/WIP/batching_1_1_0.yaml delete mode 100644 examples/policies/WIP/health_policy_lb.yaml delete mode 100644 examples/policies/WIP/lb_policy_aws.spec delete mode 100644 examples/policies/affinity_policy.yaml delete mode 100644 examples/policies/batch_policy.yaml delete mode 100644 examples/policies/deletion_policy.yaml delete mode 100644 examples/policies/deletion_policy_lifecycle_hook.yaml delete mode 100644 examples/policies/health_policy_event.yaml delete mode 100644 examples/policies/health_policy_poll.yaml delete mode 100644 examples/policies/health_policy_poll_url.yaml delete mode 100644 examples/policies/lb_policy.yaml delete mode 100644 examples/policies/placement_region.yaml delete mode 100644 examples/policies/placement_zone.yaml delete mode 100644 examples/policies/scaling_policy.yaml delete mode 100644 examples/profiles/README.rst delete mode 100644 examples/profiles/docker_container/docker_basic.yaml delete mode 100644 examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml delete mode 100644 examples/profiles/heat_stack/nova_server/nova_server_template.yaml delete mode 100644 examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml delete mode 100644 examples/profiles/heat_stack/random_string/random_string_template.yaml delete mode 100644 examples/profiles/nova_server/cirros_basic.yaml delete mode 100755 install.sh delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml delete mode 100644 releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml delete mode 100644 releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml delete mode 100644 releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml delete mode 100644 releasenotes/notes/action-purge-11db5d8018b8389a.yaml delete mode 100644 releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml delete mode 100644 releasenotes/notes/add-action-filter-40e775a26082f780.yaml delete mode 100644 releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml delete mode 100644 releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml delete mode 100644 releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml delete mode 100644 releasenotes/notes/az-info-9344b8d54c0b2665.yaml delete mode 100644 releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml delete mode 100644 releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml delete mode 100644 releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml delete mode 100644 releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml delete mode 100644 releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml delete mode 100644 releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml delete mode 100644 releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml delete mode 100644 releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml delete mode 100644 releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml delete mode 100644 releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml delete mode 100644 releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml delete mode 100644 releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml delete mode 100644 releasenotes/notes/bug-2048452-8a690353815601a0.yaml delete mode 100644 releasenotes/notes/bug-2048726-a830a7838661a41f.yaml delete mode 100644 releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml delete mode 100644 releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml delete mode 100644 releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml delete mode 100644 releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml delete mode 100644 releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml delete mode 100644 releasenotes/notes/cluster-collect-90e460c7bfede347.yaml delete mode 100644 releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml delete mode 100644 releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml delete mode 100644 releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml delete mode 100644 releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml delete mode 100644 releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml delete mode 100644 releasenotes/notes/cluster-node-status-e7fced162b415452.yaml delete mode 100644 releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml delete mode 100644 releasenotes/notes/cluster-recover-d87d429873b376db.yaml delete mode 100644 releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml delete mode 100644 releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml delete mode 100644 releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml delete mode 100644 releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml delete mode 100644 releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml delete mode 100644 releasenotes/notes/config-doc-cb8b37e360422301.yaml delete mode 100644 releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml delete mode 100644 releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml delete mode 100644 releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml delete mode 100644 releasenotes/notes/container-ops-e57d096742202206.yaml delete mode 100644 releasenotes/notes/container-profile-152bf2908c70ffad.yaml delete mode 100644 releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml delete mode 100644 releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml delete mode 100644 releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml delete mode 100644 releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml delete mode 100644 releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml delete mode 100644 releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml delete mode 100644 releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml delete mode 100644 releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml delete mode 100644 releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml delete mode 100644 releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml delete mode 100644 releasenotes/notes/doc-fixes-0783e8120b61299br.yaml delete mode 100644 releasenotes/notes/doc-fixes-5057bf93464810cc.yaml delete mode 100644 releasenotes/notes/doc-fixes-685c64d1ef509041.yaml delete mode 100644 releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml delete mode 100644 releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml delete mode 100644 releasenotes/notes/docker-reboot-999ec624186864e3.yaml delete mode 100644 releasenotes/notes/docker-start-c850c256c6149f4f.yaml delete mode 100644 releasenotes/notes/docker-update-1b465241ca78873c.yaml delete mode 100644 releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml delete mode 100644 releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml delete mode 100644 releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml delete mode 100644 releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml delete mode 100644 releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml delete mode 100644 releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml delete mode 100644 releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml delete mode 100644 releasenotes/notes/event-list-b268bb778efa9ee1.yaml delete mode 100644 releasenotes/notes/event-notification-eda06b43ce17a081.yaml delete mode 100644 releasenotes/notes/event-purge-db868a063e18eafb.yaml delete mode 100644 releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml delete mode 100644 releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml delete mode 100644 releasenotes/notes/fix-action-triggering-e880b02234028315.yaml delete mode 100644 releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml delete mode 100644 releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml delete mode 100644 releasenotes/notes/fix-cooldown-5082711989ecd536.yaml delete mode 100644 releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml delete mode 100644 releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml delete mode 100644 releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml delete mode 100644 releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml delete mode 100644 releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml delete mode 100644 releasenotes/notes/fix-health-check-5d77795885676661.yaml delete mode 100644 releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml delete mode 100644 releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml delete mode 100644 releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml delete mode 100644 releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml delete mode 100644 releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml delete mode 100644 releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml delete mode 100644 releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml delete mode 100644 releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml delete mode 100644 releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml delete mode 100644 releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml delete mode 100644 releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml delete mode 100644 releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml delete mode 100644 releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml delete mode 100644 releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml delete mode 100644 releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml delete mode 100644 releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml delete mode 100644 releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml delete mode 100644 releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml delete mode 100644 releasenotes/notes/force-delete-0b185ea6d70ed81e.yaml delete mode 100644 releasenotes/notes/gc-for-dead-engine-2246c714edc9a2df.yaml delete mode 100644 releasenotes/notes/health-add-cleanup-2d5143ec2bb78e55.yaml delete mode 100644 releasenotes/notes/health-check-interval-b3850c072600bfdf.yaml delete mode 100644 releasenotes/notes/health-lb-polling-32d83803c77cc1d8.yaml delete mode 100644 releasenotes/notes/health-manager-fixes-d5955f9af88102fc.yaml delete mode 100644 releasenotes/notes/health-manager-listener-8ddbe169e510031b.yaml delete mode 100644 releasenotes/notes/health-policy-actions-936db8bc3ed08aec.yaml delete mode 100644 releasenotes/notes/health-policy-mutiple-detection-types-10bfdc80771278cb.yaml delete mode 100644 releasenotes/notes/health-policy-properties-056d5b4aa63312c9.yaml delete mode 100644 releasenotes/notes/health-policy-suspend-7aa33fc981c0f2c9.yaml delete mode 100644 releasenotes/notes/health-poll-url-236392171bb28b3f.yaml delete mode 100644 releasenotes/notes/health-poll-url-detection-c6f10065a076510dr.yaml delete mode 100644 releasenotes/notes/health-reboot-9f74c263f7fb6767.yaml delete mode 100644 releasenotes/notes/health-recover-9aecfbf2d799abfb.yaml delete mode 100644 releasenotes/notes/heat-listener-b908d0988840e1f3.yaml delete mode 100644 releasenotes/notes/keystone-conformance-4e729da9e88b4fb3.yaml delete mode 100644 releasenotes/notes/kube-token-gen-673ea5c0d26d6872.yaml delete mode 100644 releasenotes/notes/kubernetes-dependents-1d7a70aa43ee8aa4.yaml delete mode 100644 releasenotes/notes/lb-name-instead-id-f30d4f4e05d350cb.yaml delete mode 100644 releasenotes/notes/lb-node-actions-95545338ae622f5c.yaml delete mode 100644 releasenotes/notes/lb-policy-02782a1b98142742.yaml delete mode 100644 releasenotes/notes/lb-policy-improve-165680731fb76681.yaml delete mode 100644 releasenotes/notes/lb-policy-improvement-2c18577717d28bb5.yaml delete mode 100644 releasenotes/notes/lb-project-restriction-688833a1aec6f04e.yaml delete mode 100644 releasenotes/notes/lb-support-to-recover-8f822d3c2665e225.yaml delete mode 100644 releasenotes/notes/lb-timeout-option-990ba1f359b5daab.yaml delete mode 100644 releasenotes/notes/lifecycle-hook-19a9bf85b534107d.yaml delete mode 100644 releasenotes/notes/loadbalancer-octavia-8ab8be9f703781d1.yaml delete mode 100644 releasenotes/notes/lock-break-for-dead-service-0abd3d3ea333622c.yaml delete mode 100644 releasenotes/notes/lock-retry-4d1c52ff4d42a3f9.yaml delete mode 100644 releasenotes/notes/lock-retry-ab31681e74997cf9.yaml delete mode 100644 releasenotes/notes/message-receiver-3432826515f8e70c.yaml delete mode 100644 releasenotes/notes/message-topic-7c642cff317f2bc7.yaml delete mode 100644 releasenotes/notes/metadata-query-profile-9c45d99db7b30207.yaml delete mode 100644 releasenotes/notes/more-policy-validation-ace6a4f890b2a500.yaml delete mode 100644 releasenotes/notes/more-server-operations-dd77e83b705c28f0.yaml delete mode 100644 releasenotes/notes/new-api-doc-f21eb0a9f53d7643.yaml delete mode 100644 releasenotes/notes/new-config-options-a963e5841d35ef03.yaml delete mode 100644 releasenotes/notes/new-node-create-08fe53674b0baab2.yaml delete mode 100644 releasenotes/notes/node-action-logic-4d3e94818cccaa3e.yaml delete mode 100644 releasenotes/notes/node-adopt-289a3cea24d8eb78.yaml delete mode 100644 releasenotes/notes/node-check-50d4b67796e17afb.yaml delete mode 100644 releasenotes/notes/node-check-before-recover-abf887a39ab0d355.yaml delete mode 100644 releasenotes/notes/node-create-affinity-ec126ccd3e9e0957.yaml delete mode 100644 releasenotes/notes/node-create-az-d886dea98a25229f.yaml delete mode 100644 releasenotes/notes/node-create-region-0cbac0918c703e27.yaml delete mode 100644 releasenotes/notes/node-delete-force-e4a69831af0b145d.yaml delete mode 100644 releasenotes/notes/node-detail-volumes-8e29c734f4f43442.yaml delete mode 100644 releasenotes/notes/node-health-check-0c94b9fecf35e677.yaml delete mode 100644 releasenotes/notes/node-join-leave-8b00f64cf55b675a.yaml delete mode 100644 releasenotes/notes/node-name-formatter-284b768be7fbe6c6.yaml delete mode 100644 releasenotes/notes/node-op-api-a7bede34c51854ee.yaml delete mode 100644 releasenotes/notes/node-op-return-value-73720cf91b6e2672.yaml delete mode 100644 releasenotes/notes/node-ops-115d9d64f6e261db.yaml delete mode 100644 releasenotes/notes/node-physical-id-f3393fb1a1eba4f7.yaml delete mode 100644 releasenotes/notes/node-recover-ace5311e23030f20.yaml delete mode 100644 releasenotes/notes/node-recover-fix-cc054c3f763654a0.yaml delete mode 100644 releasenotes/notes/node-role-fix-211d1536dd66066d.yaml delete mode 100644 releasenotes/notes/node-tainted-1d1c0f885cd3e4a8.yaml delete mode 100644 releasenotes/notes/node-update-timestamp-43b9639e22267598.yaml delete mode 100644 releasenotes/notes/non-operation-recover-cf0f3c0ac62bb0f3.yaml delete mode 100644 releasenotes/notes/notification-operations-c7bdaa9b56e5011f.yaml delete mode 100644 releasenotes/notes/notification-retry-logic-cb9933b4826c9d45.yaml delete mode 100644 releasenotes/notes/notification-support-a7e2ebc816bb4009.yaml delete mode 100644 releasenotes/notes/notification-transport-ae49e9cb1813cd96.yaml delete mode 100644 releasenotes/notes/nova-az-fccf8db758642d34.yaml delete mode 100644 releasenotes/notes/nova-get-image-726aa195c17a294f.yaml delete mode 100644 releasenotes/notes/nova-metadata-fix-89b7a2e06c3ce59f.yaml delete mode 100644 releasenotes/notes/nova-metadata-update-d1ab297f0e998117.yaml delete mode 100644 releasenotes/notes/nova-server-addresses-fd8afddc3fb36a0c.yaml delete mode 100644 releasenotes/notes/nova-server-validation-60612c1185738104.yaml delete mode 100644 releasenotes/notes/nova-server-validation-d36dbcf64fb90a43.yaml delete mode 100644 releasenotes/notes/nova-update-opt-7372e4d189e483aa.yaml delete mode 100644 releasenotes/notes/nova-update-validation-dca7de984c2071d1.yaml delete mode 100644 releasenotes/notes/ocata-2-c2e184a0b76231e8.yaml delete mode 100644 releasenotes/notes/octavia-network_id-and-subnet_id-changes-9ba43e19ae29ac7d.yaml delete mode 100644 releasenotes/notes/options-shuffled-29c6cfac72aaf8ff.yaml delete mode 100644 releasenotes/notes/oslo-versioned-object-support-cc9463490306c26f.yaml delete mode 100644 releasenotes/notes/param-check-cluster-update-58d4712a33f74c6e.yaml delete mode 100644 releasenotes/notes/path-check-collect-1e542762cbcd65d2.yaml delete mode 100644 releasenotes/notes/policy-enabling-61d0c38aecf314eb.yaml delete mode 100644 releasenotes/notes/policy-fixes-24857037ac054999.yaml delete mode 100644 releasenotes/notes/policy-in-code-05970b66eb27481a.yaml delete mode 100644 releasenotes/notes/policy-performance-4d2fa57ccc45bbf1.yaml delete mode 100644 releasenotes/notes/policy-retry-251cf15f06368ad4.yaml delete mode 100644 releasenotes/notes/policy-validate-04cbc74d2c025fcc.yaml delete mode 100644 releasenotes/notes/policy-validation-477a103aa83835f9.yaml delete mode 100644 releasenotes/notes/profile-only-update-5cdb3ae46a8139a8.yaml delete mode 100644 releasenotes/notes/profile-type-ops-1f0f2e6e6b5b1999.yaml delete mode 100644 releasenotes/notes/profile-validate-45a9bc520880bc6b.yaml delete mode 100644 releasenotes/notes/receiver-create-71ae7367427bf81c.yaml delete mode 100644 releasenotes/notes/receiver-create-check-2225f536f5150065.yaml delete mode 100644 releasenotes/notes/receiver-create-trust-bd5fdeb059e68330.yaml delete mode 100644 releasenotes/notes/receiver-filter-by-user-ab35a2ab8e2690d1.yaml delete mode 100644 releasenotes/notes/receiver-update-f97dc556ce3bf22e.yaml delete mode 100644 releasenotes/notes/receiver-webhook-d972369731a6ed72.yaml delete mode 100644 releasenotes/notes/receiver-webhook-v2-a7a24ae6720b5151.yaml delete mode 100644 releasenotes/notes/remove-bdm-v1-4533677f3bca3c5d.yaml delete mode 100644 releasenotes/notes/remove-py35-test-bc81b608d6afeb4a.yaml delete mode 100644 releasenotes/notes/requirement-update-941ebb5825ee9f29.yaml delete mode 100644 releasenotes/notes/requirement-update-victoria-3b150cddd189db7d.yaml delete mode 100644 releasenotes/notes/resize-params-ab4942dc11f05d9a.yaml delete mode 100644 releasenotes/notes/scaling-policy-validation-e2a1d3049e03c316.yaml delete mode 100644 releasenotes/notes/schedule-improved-6996965f07450b35.yaml delete mode 100644 releasenotes/notes/scheduler-enhancement-09f86efe4dde4051.yaml delete mode 100644 releasenotes/notes/scheduler-thread-pool-size-40905866197ef8bd.yaml delete mode 100644 releasenotes/notes/secure-password-e60243ae2befbbf6.yaml delete mode 100644 releasenotes/notes/senlin-osprofiler-fc8cb7161bdb1a6e.yaml delete mode 100644 releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml delete mode 100644 releasenotes/notes/server-image-id-27c1619fa818c6a0.yaml delete mode 100644 releasenotes/notes/service-cleanup-afacddfacd7b4dcd.yaml delete mode 100644 releasenotes/notes/service-list-5f4037ae52514f2a.yaml delete mode 100644 releasenotes/notes/service-status-report-625bc25b89907e07.yaml delete mode 100644 releasenotes/notes/service-update-2e96dd86295ddfa0.yaml delete mode 100644 releasenotes/notes/setup-script-648e9bfb89bb6255.yaml delete mode 100644 releasenotes/notes/skip-lifecycle-completion-b528464e11071666.yaml delete mode 100644 releasenotes/notes/split-engine-service-acea7821cadf9d00.yaml delete mode 100644 releasenotes/notes/support-status-f7383a53ddcae908.yaml delete mode 100644 releasenotes/notes/support-subnet-c2492ce8a377b1af.yaml delete mode 100644 releasenotes/notes/support-volume-type-07d608097c711460.yaml delete mode 100644 releasenotes/notes/switch-to-alembic-migrations-f442d0b58c3f13a6.yaml delete mode 100644 releasenotes/notes/tempest-api-test-support-c86091a7ba5fb789.yaml delete mode 100644 releasenotes/notes/tempest-functional-test-383dad4d9acff97e.yaml delete mode 100644 releasenotes/notes/template-url-19075b68d9a35a80.yaml delete mode 100644 releasenotes/notes/test-python3-train-253c0e054dd9d1e3.yaml delete mode 100644 releasenotes/notes/test-python3-victoria-ec16705d40a167c0.yaml delete mode 100644 releasenotes/notes/timestamp-datatype-86c0e47debffa919.yaml delete mode 100644 releasenotes/notes/tools-setup-d73e3298328c5355.yaml delete mode 100644 releasenotes/notes/trigger-version-af674cfe0f4693cd.yaml delete mode 100644 releasenotes/notes/unicode-az-ee5ea4346b36eefb.yaml delete mode 100644 releasenotes/notes/unicode-cluster-name-3bd5b6eeac2566f1.yaml delete mode 100644 releasenotes/notes/versioned-rpc-requests-2df5d878c279e933.yaml delete mode 100644 releasenotes/notes/vm-lock-unlock-da4c3095575c9c94.yaml delete mode 100644 releasenotes/notes/vm-migrate-6c6adee51ee8ed24.yaml delete mode 100644 releasenotes/notes/vm-pause-unpause-3e414ce4d86c7ed3.yaml delete mode 100644 releasenotes/notes/vm-rescue-unrescue-f56047419c50e957.yaml delete mode 100644 releasenotes/notes/vm-start-stop-e590e25a04fff1e0.yaml delete mode 100644 releasenotes/notes/vm-suspend-resume-a4398520255e6bbd.yaml delete mode 100644 releasenotes/notes/webhook-fix-792322c0b7f374aa.yaml delete mode 100644 releasenotes/notes/zaqar-support-470e824b7737e939.yaml delete mode 100644 releasenotes/source/2023.1.rst delete mode 100644 releasenotes/source/2023.2.rst delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/locale/zh_CN/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/pike.rst delete mode 100644 releasenotes/source/queens.rst delete mode 100644 releasenotes/source/rocky.rst delete mode 100644 releasenotes/source/stein.rst delete mode 100644 releasenotes/source/train.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 releasenotes/source/ussuri.rst delete mode 100644 releasenotes/source/victoria.rst delete mode 100644 releasenotes/source/wallaby.rst delete mode 100644 releasenotes/source/xena.rst delete mode 100644 releasenotes/source/yoga.rst delete mode 100644 releasenotes/source/zed.rst delete mode 100644 requirements.txt delete mode 100644 senlin/__init__.py delete mode 100644 senlin/api/__init__.py delete mode 100644 senlin/api/common/__init__.py delete mode 100644 senlin/api/common/serializers.py delete mode 100644 senlin/api/common/util.py delete mode 100644 senlin/api/common/version_request.py delete mode 100644 senlin/api/common/versioned_method.py delete mode 100644 senlin/api/common/wsgi.py delete mode 100644 senlin/api/middleware/__init__.py delete mode 100644 senlin/api/middleware/context.py delete mode 100644 senlin/api/middleware/fault.py delete mode 100644 senlin/api/middleware/trust.py delete mode 100644 senlin/api/middleware/version_negotiation.py delete mode 100644 senlin/api/middleware/webhook.py delete mode 100644 senlin/api/openstack/__init__.py delete mode 100755 senlin/api/openstack/history.rst delete mode 100644 senlin/api/openstack/v1/__init__.py delete mode 100644 senlin/api/openstack/v1/actions.py delete mode 100644 senlin/api/openstack/v1/build_info.py delete mode 100644 senlin/api/openstack/v1/cluster_policies.py delete mode 100644 senlin/api/openstack/v1/clusters.py delete mode 100644 senlin/api/openstack/v1/events.py delete mode 100644 senlin/api/openstack/v1/nodes.py delete mode 100644 senlin/api/openstack/v1/policies.py delete mode 100644 senlin/api/openstack/v1/policy_types.py delete mode 100644 senlin/api/openstack/v1/profile_types.py delete mode 100644 senlin/api/openstack/v1/profiles.py delete mode 100644 senlin/api/openstack/v1/receivers.py delete mode 100644 senlin/api/openstack/v1/router.py delete mode 100644 senlin/api/openstack/v1/services.py delete mode 100644 senlin/api/openstack/v1/version.py delete mode 100644 senlin/api/openstack/v1/webhooks.py delete mode 100644 senlin/api/openstack/versions.py delete mode 100644 senlin/cmd/__init__.py delete mode 100644 senlin/cmd/api.py delete mode 100644 senlin/cmd/api_wsgi.py delete mode 100644 senlin/cmd/conductor.py delete mode 100644 senlin/cmd/engine.py delete mode 100644 senlin/cmd/health_manager.py delete mode 100644 senlin/cmd/manage.py delete mode 100644 senlin/cmd/status.py delete mode 100644 senlin/common/__init__.py delete mode 100644 senlin/common/config.py delete mode 100644 senlin/common/constraints.py delete mode 100644 senlin/common/consts.py delete mode 100644 senlin/common/context.py delete mode 100644 senlin/common/exception.py delete mode 100644 senlin/common/i18n.py delete mode 100644 senlin/common/messaging.py delete mode 100644 senlin/common/policies/__init__.py delete mode 100644 senlin/common/policies/actions.py delete mode 100644 senlin/common/policies/base.py delete mode 100644 senlin/common/policies/build_info.py delete mode 100644 senlin/common/policies/cluster_policies.py delete mode 100644 senlin/common/policies/clusters.py delete mode 100644 senlin/common/policies/events.py delete mode 100644 senlin/common/policies/nodes.py delete mode 100644 senlin/common/policies/policies.py delete mode 100644 senlin/common/policies/policy_types.py delete mode 100644 senlin/common/policies/profile_types.py delete mode 100644 senlin/common/policies/profiles.py delete mode 100644 senlin/common/policies/receivers.py delete mode 100644 senlin/common/policies/services.py delete mode 100644 senlin/common/policies/webhooks.py delete mode 100644 senlin/common/policy.py delete mode 100644 senlin/common/profiler.py delete mode 100644 senlin/common/scaleutils.py delete mode 100644 senlin/common/schema.py delete mode 100644 senlin/common/service.py delete mode 100644 senlin/common/utils.py delete mode 100644 senlin/conductor/__init__.py delete mode 100644 senlin/conductor/service.py delete mode 100644 senlin/conf/__init__.py delete mode 100644 senlin/conf/api.py delete mode 100644 senlin/conf/authentication.py delete mode 100644 senlin/conf/base.py delete mode 100644 senlin/conf/conductor.py delete mode 100644 senlin/conf/dispatchers.py delete mode 100644 senlin/conf/engine.py delete mode 100644 senlin/conf/health_manager.py delete mode 100644 senlin/conf/notification.py delete mode 100644 senlin/conf/opts.py delete mode 100644 senlin/conf/receiver.py delete mode 100644 senlin/conf/revision.py delete mode 100644 senlin/conf/zaqar.py delete mode 100644 senlin/db/__init__.py delete mode 100644 senlin/db/api.py delete mode 100644 senlin/db/sqlalchemy/__init__.py delete mode 100644 senlin/db/sqlalchemy/alembic.ini delete mode 100644 senlin/db/sqlalchemy/alembic/README delete mode 100644 senlin/db/sqlalchemy/alembic/__init__.py delete mode 100644 senlin/db/sqlalchemy/alembic/env.py delete mode 100644 senlin/db/sqlalchemy/alembic/legacy_utils.py delete mode 100644 senlin/db/sqlalchemy/alembic/script.py.mako delete mode 100644 senlin/db/sqlalchemy/alembic/versions/004f8202c264_action_clusterid.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/0c04e812f224_user_project_length.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/3a04debb8cb1_cluster_config.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/569eb0b8_first_version.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/5b7cb185e0a5_registry_enable.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/662f8e74ac6f_event_column_name.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/6f73af60_service_table.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/9dbb563afc4d_node_cluster_dependents_column.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/aaa7e7755feb_node_tainted.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/ab7b23c67360_health_registry.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/beffe13cf8e5_action_starttime_endtime_type.py delete mode 100644 senlin/db/sqlalchemy/alembic/versions/c3e2bfa76dea_action_tenant.py delete mode 100644 senlin/db/sqlalchemy/api.py delete mode 100644 senlin/db/sqlalchemy/migration.py delete mode 100644 senlin/db/sqlalchemy/models.py delete mode 100644 senlin/db/sqlalchemy/types.py delete mode 100644 senlin/db/sqlalchemy/utils.py delete mode 100644 senlin/drivers/__init__.py delete mode 100644 senlin/drivers/base.py delete mode 100644 senlin/drivers/container/__init__.py delete mode 100644 senlin/drivers/container/docker_v1.py delete mode 100644 senlin/drivers/os/__init__.py delete mode 100644 senlin/drivers/os/cinder_v2.py delete mode 100644 senlin/drivers/os/glance_v2.py delete mode 100644 senlin/drivers/os/heat_v1.py delete mode 100644 senlin/drivers/os/keystone_v3.py delete mode 100644 senlin/drivers/os/lbaas.py delete mode 100644 senlin/drivers/os/mistral_v2.py delete mode 100644 senlin/drivers/os/neutron_v2.py delete mode 100644 senlin/drivers/os/nova_v2.py delete mode 100644 senlin/drivers/os/octavia_v2.py delete mode 100644 senlin/drivers/os/zaqar_v2.py delete mode 100644 senlin/drivers/sdk.py delete mode 100644 senlin/engine/__init__.py delete mode 100644 senlin/engine/actions/__init__.py delete mode 100644 senlin/engine/actions/base.py delete mode 100644 senlin/engine/actions/cluster_action.py delete mode 100644 senlin/engine/actions/custom_action.py delete mode 100644 senlin/engine/actions/node_action.py delete mode 100644 senlin/engine/cluster.py delete mode 100644 senlin/engine/cluster_policy.py delete mode 100644 senlin/engine/dispatcher.py delete mode 100644 senlin/engine/environment.py delete mode 100644 senlin/engine/event.py delete mode 100644 senlin/engine/health_manager.py delete mode 100644 senlin/engine/node.py delete mode 100644 senlin/engine/notifications/__init__.py delete mode 100644 senlin/engine/notifications/base.py delete mode 100644 senlin/engine/notifications/heat_endpoint.py delete mode 100644 senlin/engine/notifications/message.py delete mode 100644 senlin/engine/notifications/nova_endpoint.py delete mode 100644 senlin/engine/parser.py delete mode 100644 senlin/engine/receivers/__init__.py delete mode 100644 senlin/engine/receivers/base.py delete mode 100644 senlin/engine/receivers/message.py delete mode 100644 senlin/engine/receivers/webhook.py delete mode 100644 senlin/engine/registry.py delete mode 100644 senlin/engine/senlin_lock.py delete mode 100644 senlin/engine/service.py delete mode 100644 senlin/events/__init__.py delete mode 100644 senlin/events/base.py delete mode 100644 senlin/events/database.py delete mode 100644 senlin/events/message.py delete mode 100644 senlin/hacking/__init__.py delete mode 100644 senlin/hacking/checks.py delete mode 100644 senlin/health_manager/__init__.py delete mode 100644 senlin/health_manager/service.py delete mode 100644 senlin/locale/de/LC_MESSAGES/senlin.po delete mode 100644 senlin/objects/__init__.py delete mode 100644 senlin/objects/action.py delete mode 100644 senlin/objects/base.py delete mode 100644 senlin/objects/cluster.py delete mode 100644 senlin/objects/cluster_lock.py delete mode 100644 senlin/objects/cluster_policy.py delete mode 100644 senlin/objects/credential.py delete mode 100644 senlin/objects/dependency.py delete mode 100644 senlin/objects/event.py delete mode 100644 senlin/objects/fields.py delete mode 100644 senlin/objects/health_registry.py delete mode 100644 senlin/objects/node.py delete mode 100644 senlin/objects/node_lock.py delete mode 100644 senlin/objects/notification.py delete mode 100644 senlin/objects/policy.py delete mode 100644 senlin/objects/profile.py delete mode 100644 senlin/objects/receiver.py delete mode 100644 senlin/objects/requests/__init__.py delete mode 100644 senlin/objects/requests/actions.py delete mode 100644 senlin/objects/requests/build_info.py delete mode 100644 senlin/objects/requests/cluster_policies.py delete mode 100644 senlin/objects/requests/clusters.py delete mode 100644 senlin/objects/requests/credentials.py delete mode 100644 senlin/objects/requests/events.py delete mode 100644 senlin/objects/requests/nodes.py delete mode 100644 senlin/objects/requests/policies.py delete mode 100644 senlin/objects/requests/policy_type.py delete mode 100644 senlin/objects/requests/profile_type.py delete mode 100644 senlin/objects/requests/profiles.py delete mode 100644 senlin/objects/requests/receivers.py delete mode 100644 senlin/objects/requests/webhooks.py delete mode 100644 senlin/objects/service.py delete mode 100644 senlin/policies/__init__.py delete mode 100644 senlin/policies/affinity_policy.py delete mode 100644 senlin/policies/base.py delete mode 100644 senlin/policies/batch_policy.py delete mode 100644 senlin/policies/deletion_policy.py delete mode 100644 senlin/policies/health_policy.py delete mode 100644 senlin/policies/lb_policy.py delete mode 100644 senlin/policies/region_placement.py delete mode 100644 senlin/policies/scaling_policy.py delete mode 100644 senlin/policies/zone_placement.py delete mode 100644 senlin/profiles/__init__.py delete mode 100644 senlin/profiles/base.py delete mode 100644 senlin/profiles/container/__init__.py delete mode 100644 senlin/profiles/container/docker.py delete mode 100644 senlin/profiles/os/__init__.py delete mode 100644 senlin/profiles/os/heat/__init__.py delete mode 100644 senlin/profiles/os/heat/stack.py delete mode 100644 senlin/profiles/os/nova/__init__.py delete mode 100644 senlin/profiles/os/nova/server.py delete mode 100644 senlin/rpc/__init__.py delete mode 100644 senlin/rpc/client.py delete mode 100644 senlin/tests/__init__.py delete mode 100644 senlin/tests/drivers/__init__.py delete mode 100644 senlin/tests/drivers/os_test/README.rst delete mode 100644 senlin/tests/drivers/os_test/__init__.py delete mode 100644 senlin/tests/drivers/os_test/cinder_v2.py delete mode 100644 senlin/tests/drivers/os_test/glance_v2.py delete mode 100644 senlin/tests/drivers/os_test/heat_v1.py delete mode 100644 senlin/tests/drivers/os_test/keystone_v3.py delete mode 100644 senlin/tests/drivers/os_test/lbaas.py delete mode 100644 senlin/tests/drivers/os_test/mistral_v2.py delete mode 100644 senlin/tests/drivers/os_test/neutron_v2.py delete mode 100644 senlin/tests/drivers/os_test/nova_v2.py delete mode 100644 senlin/tests/drivers/os_test/octavia_v2.py delete mode 100644 senlin/tests/drivers/os_test/zaqar_v2.py delete mode 100644 senlin/tests/unit/__init__.py delete mode 100644 senlin/tests/unit/api/__init__.py delete mode 100644 senlin/tests/unit/api/common/__init__.py delete mode 100644 senlin/tests/unit/api/common/test_serializers.py delete mode 100644 senlin/tests/unit/api/common/test_util.py delete mode 100644 senlin/tests/unit/api/common/test_version_request.py delete mode 100644 senlin/tests/unit/api/common/test_wsgi.py delete mode 100644 senlin/tests/unit/api/middleware/__init__.py delete mode 100644 senlin/tests/unit/api/middleware/policy/check_admin.json delete mode 100644 senlin/tests/unit/api/middleware/policy/notallowed.json delete mode 100644 senlin/tests/unit/api/middleware/test_context.py delete mode 100644 senlin/tests/unit/api/middleware/test_fault.py delete mode 100644 senlin/tests/unit/api/middleware/test_middleware_filters.py delete mode 100644 senlin/tests/unit/api/middleware/test_trust.py delete mode 100644 senlin/tests/unit/api/middleware/test_version_negotiation.py delete mode 100644 senlin/tests/unit/api/middleware/test_webhook.py delete mode 100644 senlin/tests/unit/api/openstack/__init__.py delete mode 100644 senlin/tests/unit/api/openstack/test_versions.py delete mode 100644 senlin/tests/unit/api/openstack/v1/__init__.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_actions.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_buildinfo.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_cluster_policies.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_clusters.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_events.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_nodes.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_policies.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_policy_types.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_profile_types.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_profiles.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_receivers.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_router.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_services.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_version.py delete mode 100644 senlin/tests/unit/api/openstack/v1/test_webhooks.py delete mode 100644 senlin/tests/unit/api/shared.py delete mode 100644 senlin/tests/unit/cmd/__init__.py delete mode 100644 senlin/tests/unit/cmd/test_conductor.py delete mode 100644 senlin/tests/unit/cmd/test_engine.py delete mode 100644 senlin/tests/unit/cmd/test_health_manager.py delete mode 100644 senlin/tests/unit/cmd/test_status.py delete mode 100644 senlin/tests/unit/common/__init__.py delete mode 100644 senlin/tests/unit/common/base.py delete mode 100644 senlin/tests/unit/common/utils.py delete mode 100644 senlin/tests/unit/conductor/__init__.py delete mode 100644 senlin/tests/unit/conductor/service/__init__.py delete mode 100644 senlin/tests/unit/conductor/service/test_actions.py delete mode 100644 senlin/tests/unit/conductor/service/test_cluster_op.py delete mode 100644 senlin/tests/unit/conductor/service/test_cluster_policies.py delete mode 100644 senlin/tests/unit/conductor/service/test_clusters.py delete mode 100644 senlin/tests/unit/conductor/service/test_credentials.py delete mode 100644 senlin/tests/unit/conductor/service/test_events.py delete mode 100644 senlin/tests/unit/conductor/service/test_nodes.py delete mode 100644 senlin/tests/unit/conductor/service/test_policies.py delete mode 100644 senlin/tests/unit/conductor/service/test_policy_types.py delete mode 100644 senlin/tests/unit/conductor/service/test_profile_types.py delete mode 100644 senlin/tests/unit/conductor/service/test_profiles.py delete mode 100644 senlin/tests/unit/conductor/service/test_receivers.py delete mode 100644 senlin/tests/unit/conductor/service/test_webhooks.py delete mode 100644 senlin/tests/unit/conductor/test_service.py delete mode 100644 senlin/tests/unit/db/__init__.py delete mode 100644 senlin/tests/unit/db/shared.py delete mode 100644 senlin/tests/unit/db/test_action_api.py delete mode 100644 senlin/tests/unit/db/test_cluster_api.py delete mode 100644 senlin/tests/unit/db/test_cluster_policy_api.py delete mode 100644 senlin/tests/unit/db/test_cred_api.py delete mode 100644 senlin/tests/unit/db/test_event_api.py delete mode 100644 senlin/tests/unit/db/test_lock_api.py delete mode 100644 senlin/tests/unit/db/test_migration.py delete mode 100644 senlin/tests/unit/db/test_node_api.py delete mode 100644 senlin/tests/unit/db/test_policy_api.py delete mode 100644 senlin/tests/unit/db/test_profile_api.py delete mode 100644 senlin/tests/unit/db/test_receiver_api.py delete mode 100644 senlin/tests/unit/db/test_registry_api.py delete mode 100644 senlin/tests/unit/db/test_service_api.py delete mode 100644 senlin/tests/unit/db/test_sqlalchemy_types.py delete mode 100644 senlin/tests/unit/db/test_sqlalchemy_utils.py delete mode 100644 senlin/tests/unit/drivers/__init__.py delete mode 100644 senlin/tests/unit/drivers/test_cinder_v2.py delete mode 100644 senlin/tests/unit/drivers/test_docker_v1.py delete mode 100644 senlin/tests/unit/drivers/test_driver.py delete mode 100644 senlin/tests/unit/drivers/test_glance_v2.py delete mode 100644 senlin/tests/unit/drivers/test_heat_v1.py delete mode 100644 senlin/tests/unit/drivers/test_keystone_v3.py delete mode 100644 senlin/tests/unit/drivers/test_lbaas.py delete mode 100644 senlin/tests/unit/drivers/test_mistral_v2.py delete mode 100644 senlin/tests/unit/drivers/test_neutron_v2.py delete mode 100644 senlin/tests/unit/drivers/test_nova_v2.py delete mode 100644 senlin/tests/unit/drivers/test_octavia_v2.py delete mode 100644 senlin/tests/unit/drivers/test_sdk.py delete mode 100644 senlin/tests/unit/drivers/test_zaqar_v2.py delete mode 100644 senlin/tests/unit/engine/__init__.py delete mode 100644 senlin/tests/unit/engine/actions/__init__.py delete mode 100644 senlin/tests/unit/engine/actions/test_action_base.py delete mode 100644 senlin/tests/unit/engine/actions/test_add_nodes.py delete mode 100644 senlin/tests/unit/engine/actions/test_attach_policy.py delete mode 100644 senlin/tests/unit/engine/actions/test_check.py delete mode 100644 senlin/tests/unit/engine/actions/test_cluster_action.py delete mode 100644 senlin/tests/unit/engine/actions/test_create.py delete mode 100644 senlin/tests/unit/engine/actions/test_custom_action.py delete mode 100644 senlin/tests/unit/engine/actions/test_del_nodes.py delete mode 100644 senlin/tests/unit/engine/actions/test_delete.py delete mode 100644 senlin/tests/unit/engine/actions/test_node_action.py delete mode 100644 senlin/tests/unit/engine/actions/test_operation.py delete mode 100644 senlin/tests/unit/engine/actions/test_recover.py delete mode 100644 senlin/tests/unit/engine/actions/test_replace_nodes.py delete mode 100644 senlin/tests/unit/engine/actions/test_resize.py delete mode 100644 senlin/tests/unit/engine/actions/test_scale_in.py delete mode 100644 senlin/tests/unit/engine/actions/test_scale_out.py delete mode 100644 senlin/tests/unit/engine/actions/test_update.py delete mode 100644 senlin/tests/unit/engine/actions/test_update_policy.py delete mode 100644 senlin/tests/unit/engine/actions/test_wait.py delete mode 100644 senlin/tests/unit/engine/notifications/__init__.py delete mode 100644 senlin/tests/unit/engine/notifications/test_heat_endpoint.py delete mode 100644 senlin/tests/unit/engine/notifications/test_message.py delete mode 100644 senlin/tests/unit/engine/notifications/test_nova_endpoint.py delete mode 100644 senlin/tests/unit/engine/receivers/__init__.py delete mode 100644 senlin/tests/unit/engine/receivers/test_message.py delete mode 100644 senlin/tests/unit/engine/receivers/test_receiver.py delete mode 100644 senlin/tests/unit/engine/receivers/test_webhook.py delete mode 100644 senlin/tests/unit/engine/test_cluster.py delete mode 100644 senlin/tests/unit/engine/test_cluster_policy.py delete mode 100644 senlin/tests/unit/engine/test_engine_parser.py delete mode 100644 senlin/tests/unit/engine/test_environment.py delete mode 100644 senlin/tests/unit/engine/test_event.py delete mode 100644 senlin/tests/unit/engine/test_health_manager.py delete mode 100644 senlin/tests/unit/engine/test_node.py delete mode 100644 senlin/tests/unit/engine/test_registry.py delete mode 100644 senlin/tests/unit/engine/test_senlin_lock.py delete mode 100644 senlin/tests/unit/engine/test_service.py delete mode 100644 senlin/tests/unit/events/__init__.py delete mode 100644 senlin/tests/unit/events/test_base.py delete mode 100644 senlin/tests/unit/events/test_database.py delete mode 100644 senlin/tests/unit/events/test_message.py delete mode 100644 senlin/tests/unit/fakes.py delete mode 100644 senlin/tests/unit/health_manager/__init__.py delete mode 100644 senlin/tests/unit/health_manager/test_service.py delete mode 100644 senlin/tests/unit/objects/__init__.py delete mode 100644 senlin/tests/unit/objects/requests/__init__.py delete mode 100644 senlin/tests/unit/objects/requests/test_actions.py delete mode 100644 senlin/tests/unit/objects/requests/test_cluster_policies.py delete mode 100644 senlin/tests/unit/objects/requests/test_clusters.py delete mode 100644 senlin/tests/unit/objects/requests/test_credentials.py delete mode 100644 senlin/tests/unit/objects/requests/test_events.py delete mode 100644 senlin/tests/unit/objects/requests/test_nodes.py delete mode 100644 senlin/tests/unit/objects/requests/test_policies.py delete mode 100644 senlin/tests/unit/objects/requests/test_policy_type.py delete mode 100644 senlin/tests/unit/objects/requests/test_profile_type.py delete mode 100644 senlin/tests/unit/objects/requests/test_profiles.py delete mode 100644 senlin/tests/unit/objects/requests/test_receivers.py delete mode 100644 senlin/tests/unit/objects/requests/test_webhooks.py delete mode 100644 senlin/tests/unit/objects/test_action.py delete mode 100644 senlin/tests/unit/objects/test_base.py delete mode 100644 senlin/tests/unit/objects/test_cluster.py delete mode 100644 senlin/tests/unit/objects/test_event.py delete mode 100644 senlin/tests/unit/objects/test_fields.py delete mode 100644 senlin/tests/unit/objects/test_health_registry.py delete mode 100644 senlin/tests/unit/objects/test_node.py delete mode 100644 senlin/tests/unit/objects/test_notification.py delete mode 100644 senlin/tests/unit/objects/test_policy.py delete mode 100644 senlin/tests/unit/objects/test_profile.py delete mode 100644 senlin/tests/unit/objects/test_receiver.py delete mode 100644 senlin/tests/unit/policies/__init__.py delete mode 100644 senlin/tests/unit/policies/test_affinity.py delete mode 100644 senlin/tests/unit/policies/test_batch_policy.py delete mode 100644 senlin/tests/unit/policies/test_deletion_policy.py delete mode 100644 senlin/tests/unit/policies/test_health_policy.py delete mode 100644 senlin/tests/unit/policies/test_lb_policy.py delete mode 100644 senlin/tests/unit/policies/test_policy.py delete mode 100644 senlin/tests/unit/policies/test_region_placement.py delete mode 100644 senlin/tests/unit/policies/test_scaling_policy.py delete mode 100644 senlin/tests/unit/policies/test_zone_placement.py delete mode 100644 senlin/tests/unit/profiles/__init__.py delete mode 100644 senlin/tests/unit/profiles/test_container_docker.py delete mode 100644 senlin/tests/unit/profiles/test_heat_stack.py delete mode 100644 senlin/tests/unit/profiles/test_nova_server.py delete mode 100644 senlin/tests/unit/profiles/test_nova_server_update.py delete mode 100644 senlin/tests/unit/profiles/test_nova_server_validate.py delete mode 100644 senlin/tests/unit/profiles/test_profile_base.py delete mode 100644 senlin/tests/unit/test_common_constraints.py delete mode 100644 senlin/tests/unit/test_common_context.py delete mode 100644 senlin/tests/unit/test_common_exception.py delete mode 100644 senlin/tests/unit/test_common_messaging.py delete mode 100644 senlin/tests/unit/test_common_policy.py delete mode 100644 senlin/tests/unit/test_common_scaleutils.py delete mode 100644 senlin/tests/unit/test_common_schema.py delete mode 100644 senlin/tests/unit/test_common_utils.py delete mode 100644 senlin/tests/unit/test_conf.py delete mode 100644 senlin/tests/unit/test_hacking.py delete mode 100644 senlin/tests/unit/test_rpc_client.py delete mode 100644 senlin/version.py delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100644 tools/README.rst delete mode 100644 tools/config-generator.conf delete mode 100755 tools/gen-config delete mode 100755 tools/gen-policy delete mode 100755 tools/gen-pot-files delete mode 100644 tools/policy-generator.conf delete mode 100755 tools/senlin-db-recreate delete mode 100755 tools/setup-service delete mode 100644 tox.ini delete mode 100755 uninstall.sh diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 94325d02e..000000000 --- a/.coveragerc +++ /dev/null @@ -1,9 +0,0 @@ -[run] -branch = True -source = senlin -omit = senlin/tests/*,senlin/hacking/* -concurrency = greenlet - -[report] -ignore_errors = True - diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 00ae09a3c..000000000 --- a/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -AUTHORS -ChangeLog -build -cover -cover-master -dist -doc/source/_static/senlin.policy.yaml.sample -etc/senlin/policy.yaml.sample -etc/senlin/senlin.conf.sample -releasenodes/build -senlin-test.db -senlin.sqlite -tags -*~ -*.eggs -*.egg-info -*.iml -*.log -*.pyc -*.swp -*.swo -.coverage -.coverage.* -.idea -.project -.pydevproject -.tox -.venv -.DS_Store -.stestr diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index 92f7ddfb9..000000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=${OS_TEST_PATH:-./senlin/tests/unit} -top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index 458a626d8..000000000 --- a/.zuul.yaml +++ /dev/null @@ -1,180 +0,0 @@ -- project: - queue: senlin - templates: - - check-requirements - - openstack-python3-jobs - - publish-openstack-docs-pti - - release-notes-jobs-python3 - check: - jobs: - - senlin-dsvm-tempest-py3-api - - senlin-dsvm-tempest-py3-api-sqlalchemy-2x - - senlin-tempest-api-ipv6-only - - senlin-dsvm-tempest-py3-functional - - senlin-dsvm-tempest-py3-functional-sqlalchemy-2x - - senlin-dsvm-tempest-py3-integration - - senlin-dsvm-tempest-py3-integration-zaqar: - voting: false - - openstack-tox-cover: - voting: false - gate: - jobs: - - senlin-dsvm-tempest-py3-api - - senlin-tempest-api-ipv6-only - - senlin-dsvm-tempest-py3-functional - -- job: - name: senlin-tempest-base - parent: devstack-tempest - description: Senlin Devstack tempest base job - timeout: 7800 - required-projects: &base_required_projects - - openstack/senlin - - openstack/senlin-tempest-plugin - irrelevant-files: &base_irrelevant_files - - ^.*\.rst$ - - ^api-ref/.*$ - - ^doc/.*$ - - ^releasenotes/.*$ - vars: &base_vars - tox_envlist: all - devstack_services: - tempest: true - devstack_plugins: - senlin: https://opendev.org/openstack/senlin - devstack_localrc: - TEMPEST_PLUGINS: '/opt/stack/senlin-tempest-plugin' - USE_PYTHON3: true - devstack_local_conf: - test-config: - $TEMPEST_CONFIG: - clustering: - min_microversion: 1.12 - max_microversion: 1.12 - delete_with_dependency: True - health_policy_version: '1.1' - -- job: - name: senlin-dsvm-tempest-py3-api - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_localrc: - USE_PYTHON3: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - -- job: - name: senlin-dsvm-tempest-py3-api-sqlalchemy-2x - parent: senlin-tempest-base - required-projects: - - name: openstack/oslo.db - vars: - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_localrc: - USE_PYTHON3: true - USE_SQLALCHEMY_LATEST: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - -- job: - name: senlin-dsvm-tempest-py3-functional - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.functional - devstack_localrc: - USE_PYTHON3: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - health_check_interval_min: 10 - -- job: - name: senlin-dsvm-tempest-py3-functional-sqlalchemy-2x - parent: senlin-tempest-base - required-projects: - - name: openstack/oslo.db - vars: - tempest_test_regex: senlin_tempest_plugin.tests.functional - devstack_localrc: - USE_PYTHON3: true - USE_SQLALCHEMY_LATEST: true - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test - health_check_interval_min: 10 - -- job: - name: senlin-dsvm-tempest-py3-integration - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.integration(?!\.test_nova_server_cluster.TestNovaServerCluster).* - devstack_plugins: - heat: https://opendev.org/openstack/heat - devstack_localrc: - USE_PYTHON3: true - TEMPEST_PLUGINS: '"/opt/stack/senlin-tempest-plugin /opt/stack/zaqar-tempest-plugin"' - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - health_check_interval_min: 10 - required-projects: - - openstack/heat - - openstack/octavia - - openstack/python-zaqarclient - - openstack/senlin - - openstack/senlin-tempest-plugin - - openstack/zaqar-tempest-plugin - -- job: - name: senlin-dsvm-tempest-py3-integration-zaqar - parent: senlin-tempest-base - vars: - tempest_test_regex: senlin_tempest_plugin.tests.integration.test_nova_server_cluster.TestNovaServerCluster - devstack_plugins: - zaqar: https://opendev.org/openstack/zaqar - heat: https://opendev.org/openstack/heat - devstack_localrc: - USE_PYTHON3: true - TEMPEST_PLUGINS: '"/opt/stack/senlin-tempest-plugin /opt/stack/zaqar-tempest-plugin"' - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - health_check_interval_min: 10 - required-projects: - - openstack/heat - - openstack/python-zaqarclient - - openstack/senlin - - openstack/senlin-tempest-plugin - - openstack/zaqar - - openstack/zaqar-ui - - openstack/zaqar-tempest-plugin - -- job: - name: senlin-tempest-api-ipv6-only - parent: devstack-tempest-ipv6 - description: | - Senlin devstack tempest tests job for IPv6-only deployment - irrelevant-files: *base_irrelevant_files - required-projects: *base_required_projects - timeout: 7800 - vars: - <<: *base_vars - tempest_test_regex: senlin_tempest_plugin.tests.api - devstack_local_conf: - post-config: - $SENLIN_CONF: - DEFAULT: - cloud_backend: openstack_test diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 3925fd52f..000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,91 +0,0 @@ -Before You Start -================ - -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - - -Where to Start -============== - -There are many ways to start your contribution. - -Sign on a bug to fix --------------------- - -Bugs related to senlin are reported and tracked on the individual sites on -Launchpad: - -- Senlin Server: https://bugs.launchpad.net/senlin -- Senlin Client: https://bugs.launchpad.net/python-senlinclient -- Senlin Dashboard: https://bugs.launchpad.net/senlin-dashboard - -You can pick any bug item that has not been assigned to work on. Each bug fix -patch should be accompanied with a release note. - - -Pick a TODO item ----------------- - -Senlin team maintains a ``TODO.rst`` file under the root directory, where you -can add new items, claim existing items and remove items that are completed. -You may want to check if there are items you can pick by: - -#. Propose a patch to remove the item from the ``TODO.rst`` file. -#. Add an item to the `etherpad page`_ which the core team uses to track the - progress of individual work items. -#. Start working on the item and keep updating your progress on the `etherpad - page`_, e.g. paste the patch review link to the page. -#. Mark the item from the `etherpad page`_ as completed when the patches are - all merged. - - -Start a Bigger Effort ---------------------- - -Senlin team also maintains a ``FEATURES.rst`` file under the root directory, -where you can add new items by proposing a patch to the file or claim an item -to work on. However, the work items in the ``FEATURES.rst`` file are all -non-trivial, thus demands for a deeper discussion before being worked on. The -expected workflow for these items is: - -#. Propose a spec file to the ``doc/specs`` directory describing the detailed - design and other options, if any. -#. Work with the reviewers to polish the design until it is accepted. -#. Propose blueprint(s) to track the progress of the work item by registering - them at the `blueprint page`_. -#. Start working on the blueprints and checking in patches. Each patch should - have a ``partial-blueprint: `` tag in its commit message. -#. For each blueprint, add an item to the `etherpad page`_ so that it can be - closely tracked in weekly meetings. -#. Mark the blueprint(s) as completed when all related patches are merged. -#. Propose a patch to the ``FEATURES.rst`` file to remove the work item. -#. Propose a separate release note patch for the new feature. - - -Reporting Bugs -============== - -Bugs should be filed on Launchpad site: - -- Senlin Server: https://bugs.launchpad.net/senlin -- Senlin Client: https://bugs.launchpad.net/python-senlinclient -- Senlin Dashboard: https://bugs.launchpad.net/senlin-dashboard - - -Meet the Developers -=================== - -Real-time communication among developers are mostly done via IRC. -The team is using the #senlin channel on oftc.net. - -.. _`etherpad page`: https://etherpad.openstack.org/p/senlin-newton-workitems -.. _`blueprint page`: https://blueprints.launchpad.net/senlin diff --git a/FEATURES.rst b/FEATURES.rst deleted file mode 100644 index d94331d05..000000000 --- a/FEATURES.rst +++ /dev/null @@ -1,284 +0,0 @@ -Senlin Feature Request Pipeline -=============================== - -This document records the feature requests the developer team has received and -considered. This document SHOULD NOT be treated as a replacement of the -blueprints (or specs) which already accompanied with a design. The feature -requests here are meant to be a pipeline for mid-term goals that Senlin should -strive to achieve. Whenever a feature can be implemented with a practical -design, the feature should be moved to a blueprint (and/or specs) review. - -This document SHOULD NOT be treated as a replacement of the `TODO` file the -development team is maintaining. The `TODO` file records actionable work items -that can be picked up by any developer who is willing to do it, while this -document records more general requirements that needs at least a draft design -before being worked on. - - -High Priority -~~~~~~~~~~~~~ - -TOSCA support -------------- - -Provide TOSCA support in Senlin (maybe reuse heat-translator/tosca-parser?) - - -Advanced Container Clustering ------------------------------ - -Container cluster management: - -- Scheduling -- Networking/Storage -- APIs/Operations -- Security issues -- Dependencies - - -Better Versioning for Profile/Policy ------------------------------------- - -Profile/Policy schema could vary over time for properties being added or -deprecated. Versioning support is important for keeping backward -compatibility when profile/policy evolve. - - -Role-specific Profiles ----------------------- - -There are needs to have nodes of the same role to share a common profile while -nodes of different roles having different profiles. The pre-condition for this -is that the profile-types match. - - -Scavenger Process ------------------ - -Senlin needs a scavenger process that runs as a background daemon. It is -tasked with cleansing database for old data, e.g. event records. Its behavior -must be customizable because users may want the old records to be removed or -to be archived in a certain way. - - -Fault Tolerance ---------------- - -Senlin in most cases will be managing clusters with nodes distributed -somewhere. One problems inherent to such a distributed architecture is about -partial failures, communication latencies, concurrency, consistency etc. There -are hardware/software failures expected. Senlin must remain operational in the -face of such failures. - - -Scaling to Existing Nodes -------------------------- - -[Conclusion from Austin: https://etherpad.openstack.org/p/newton-senlin-as] - -Senlin can improve scale-out operation so that it can add existing nodes to -a cluster when doing scale-out. We are not intended to scale to nodes not -created by Senlin. - - -Adoption of Nodes ------------------ - -There have been requirements on adopting existing resources (e.g. nova -servers) to be managed by Senlin. - - -Middle Priority -~~~~~~~~~~~~~~~ - -Access Control --------------- - -Currently, all access to Senlin objects like cluster, profile are project_safe -by default. This is for preventing user manipulating resources belong to other -users. However, sharing resource between different users/projects with limited -privilege(e.g. read-only, read-write) is also a very reasonable demand in many -cases. Therefore, we may need to provide access permission control in Senlin to -support this kind of requirement. - - -Blue-Green Deployment ---------------------- - -Support to deploy environments using blue-green deployment pattern. -http://martinfowler.com/bliki/BlueGreenDeployment.html - - -Multi-cloud Support -------------------- - -In some case, user could have the demand to create/scale cluster cross different -clouds. Therefore, Senlin is supposed to have the ability to manage nodes which -span cross multiple clouds within the same cluster. Support from both profile -and policy layers are necessary for providing this ability. - - -Customizable Batch Processing ------------------------------ - -An important non-functional requirement for Senlin is the scale of clusters it -can handle. We will strive to make it handle large scale ones, however that -indicates that we need to improve DB accesses in case of heavy loads. One -potential tradeoff is to introduce an option for users to customize the size -of batches when large number of DB requests pouring in. - - -Support to Bare-metal ---------------------- - -Managing baremetal cluster is a very common requirement from user. It is -reasonable for Senlin to support it by talking with service like Ironic. - - -Improve health schedule ------------------------ -Schedule which engine to handle which clusters health registries can be -improved. For example:1. When first engine start it will run all health -registries. 2. When the other engine start it can send a broadcast -message which carried its handling capacity and said it want to assume -some health registries. - - -Host Fencing Support --------------------- -To ensure a seemingly dead node is actually dead, all HA solutions need a way -to kill a node for sure. Senlin is no exception here. We have support to force -delete a VM instance already. The need is a mechanism to kill a failed host. - - -LB HealthMonitor based failure detection ----------------------------------------- -Ideally, Senlin could rely on the LBaaS service for node failure detection -rather than reinventing the wheel. However, LBaaS (Octavia) is not fixing the -obvious bug. -Another option is to have LBaaS emit events when node failures are detected. -This proposal has failed find its way into the upstream. -When the upstream project (Octavia) has such features, we can enable them from -Senlin side. - - -Low Priority -~~~~~~~~~~~~ - -User Defined Actions --------------------- - -Actions in Senlin are mostly built-in ones at present. There are requirements -to incorporate Shell scripts and/or other structured software configuration -tools into the whole picture. One of the option is to provide an easy way for -Senlin to work with Ansible, for example. - - -Use Barbican to Store Secrets ------------------------------ - -Currently, Senlin uses the `cryptography` package for data encryption and -decryption. There should be support for users to store credentials using the -Barbican service, in addition to the current solution. - - -Use VPNaaS to Build Cross-Region/Cross-Cloud --------------------------------------------- - -When building clusters that span more than one region or cloud, there are -requirements to place all cluster nodes on the same VPN so that workloads can -be distributed to the nodes as if they sit on the same network. - - -Vertical Scaling ----------------- - -Though Senlin is mainly concerns about the horizontal scaling in/out support, -there are possibilities/requirements to scale nodes in the vertical direction. -Vertical scaling means automatically adding compute/storage/network resources -to cluster nodes. Depending on the support from corresponding services, this -could be explored. - - -Replace Green Threads with Python Threading -------------------------------------------- - -Senlin is now using green threads (eventlets) for async executions. The -eventlets execution model is not making the use of multi-processing platforms -in an efficient way. Senlin needs a scalable execution engine, so native -multi-threading is needed. - - -Metrics Collection ------------------- - -Senlin needs to support metric collections about the clusters and nodes it -manages. These metrics should be collectible by the ceilometer service, for -example. - - -AWS Compatible API ------------------- - -There are requirements for Senlin to provide an AWS compatible API layer so -that existing workloads can be deployed to Senlin and AWS without needing to -change a lot of code or configurations. - - -Integration with Mistral ------------------------- - -There are cases where the (automated) operations on clusters and nodes form a -workflow. For example, an event triggers some actions to be executed in -sequence and those actions in turn triggers other actions to be executed. - - -Support to Suspend/Resume Operations ------------------------------------- - -A user may want to suspend/resume a cluster or an individual node. Senlin -needs to provide a generic definition of 'suspend' and 'resume'. It needs to -be aware of whether the profile and the driver support such operations. - - -Interaction with Congress -------------------------- - -This is of low priority because Senlin needs a notification mechanism in place -before it can talk to Congress. The reason to interact with Congress is that -there could be enterprise level policy enforcement that Senlin has to comply -to. - - -Investigation of Tooz ---------------------- - -There is requirement to manage multiple senlin-engine instances in a -distributed way. Or, we can use a variant of DLM to manage cluster membership. -E.g. use redis/zookeeper to build clusters in their sense so that when the -cluster membership changes, we may possibly receive a notification. This would -be helpful for cluster health management. - -Tooz is the promised focal point in this field, generalizing the many backends -that we don't want to care about. This TODO item is about two things: - -#. Whether Tooz does provide a reliable membership management infra? -#. Is there a comparison between zookeeper and redis for example. - - -Support to Scheduled Actions ----------------------------- - -This is a request to trigger some actions at a specified time. One typical use -case is to scale up a cluster before weekend or promotion season as a -preparation for the coming burst of workloads. - - -Dynamic Plugin Loading ----------------------- - -Design and implement dynamic plugin loading mechanism that allows loading -plugins from any paths. - - - diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 3b1bbc165..000000000 --- a/HACKING.rst +++ /dev/null @@ -1,57 +0,0 @@ -Senlin Style Commandments -========================= - -- Step 1: Read the OpenStack Style Commandments - https://docs.openstack.org/hacking/latest/ -- Step 2: Read on - -Senlin Specific Commandments ----------------------------- - -- [S318] Use assertion ``assertIsNone(A)`` instead of ``assertEqual(A, None)`` - or ``assertEqual(None, A)``. -- [S319] Use ``jsonutils`` functions rather than using the ``json`` package - directly. -- [S320] Default arguments of a method should not be mutable. -- [S321] The api_version decorator has to be the first decorator on a method. -- [S322] LOG.warn is deprecated. Enforce use of LOG.warning. -- [S323] Use assertTrue(...) rather than assertEqual(True, ...). - -Working on APIs ---------------- - -If you are proposing new APIs or fixes to existing APIs, please spend some -time reading the guidelines published by the API WorkGroup: - -https://opendev.org/openstack/api-sig/src/branch/master/guidelines - -Any work on improving Senlin's APIs to conform to the guidelines are welcomed. - -Creating Unit Tests -------------------- - -For every new feature, unit tests should be created that both test and -(implicitly) document the usage of said feature. When submitting a patch to a -bug without a unit test, a new unit test should be added. If a submitted bug -fix does have a unit test, be sure to add a new one that fails without the -patch and passes with the patch. - -For more information on creating and running unit tests , please read -senlin/doc/source/contributor/testing.rst. Test guide online link: -https://docs.openstack.org/senlin/latest/contributor/testing.html - - -Running Tests -------------- - -The testing system is based on a combination of `tox` and `testr`. The -canonical approach to running tests is to simply run the command `tox`. -This will create virtual environments, populate them with dependencies and -run all of the tests that OpenStack CI systems run. - -Behind the scenes, `tox` is running `ostestr --slowest`, but is set up such -that you can supply any additional arguments to the `ostestr` command. -For example, the following command makes `tox` to tell `ostestr` to add -`--analyze-isolation` to its argument list:: - - tox -- --analyze-isolation diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README.rst b/README.rst index 7b6988033..4ee2c5f13 100644 --- a/README.rst +++ b/README.rst @@ -1,96 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: https://governance.openstack.org/tc/badges/senlin.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -Senlin -====== - --------- -Overview --------- - -Senlin is a clustering service for OpenStack clouds. It creates and operates -clusters of homogeneous objects exposed by other OpenStack services. The goal -is to make the orchestration of collections of similar objects easier. - -Senlin provides RESTful APIs to users so that they can associate various -policies to a cluster. Sample policies include placement policy, load -balancing policy, health policy, scaling policy, update policy and so on. - -Senlin is designed to be capable of managing different types of objects. An -object's lifecycle is managed using profile type implementations, which are -themselves plugins. - ---------- -For Users ---------- - -If you want to install Senlin for a try out, please refer to the documents -under the ``doc/source/user/`` subdirectory. User guide online link: -https://docs.openstack.org/senlin/latest/#user-references - --------------- -For Developers --------------- - -There are many ways to help improve the software, for example, filing a bug, -submitting or reviewing a patch, writing or reviewing some documents. There -are documents under the ``doc/source/contributor`` subdirectory. Developer -guide online link: https://docs.openstack.org/senlin/latest/#developer-s-guide - ---------- -Resources ---------- - -Launchpad Projects ------------------- -- Server: https://launchpad.net/senlin -- Client: https://launchpad.net/python-senlinclient -- Dashboard: https://launchpad.net/senlin-dashboard -- Tempest Plugin: https://launchpad.net/senlin-tempest-plugin - -Code Repository ---------------- -- Server: https://opendev.org/openstack/senlin -- Client: https://opendev.org/openstack/python-senlinclient -- Dashboard: https://opendev.org/openstack/senlin-dashboard -- Tempest Plugin: https://opendev.org/openstack/senlin-tempest-plugin - -Blueprints ----------- -- Blueprints: https://blueprints.launchpad.net/senlin - -Bug Tracking ------------- -- Server Bugs: https://bugs.launchpad.net/senlin -- Client Bugs: https://bugs.launchpad.net/python-senlinclient -- Dashboard Bugs: https://bugs.launchpad.net/senlin-dashboard -- Tempest Plugin Bugs: https://bugs.launchpad.net/senlin-tempest-plugin - -Weekly Meetings ---------------- -- Schedule: every Tuesday at 1300 UTC, on #openstack-meeting channel -- Agenda: https://wiki.openstack.org/wiki/Meetings/SenlinAgenda -- Archive: http://eavesdrop.openstack.org/meetings/senlin/2015/ - -IRC ---- -IRC Channel: #senlin on `OFTC`_. - -Mailinglist ------------ -Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss -as the mailinglist. Please use tag ``[Senlin]`` in the subject for new -threads. - - -.. _OFTC: https://oftc.net/ - -Release notes ------------------- -- Release notes: https://docs.openstack.org/releasenotes/senlin/ +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/TODO.rst b/TODO.rst deleted file mode 100644 index 57ae80d7b..000000000 --- a/TODO.rst +++ /dev/null @@ -1,98 +0,0 @@ -Senlin TODO Item List -===================== -This document records all workitems the team want to finish in a short-term -(usually a development cycle which lasts 6 month). All jobs listed here are NOT -in working progress which means developers can pick up any workitem they are -interested in if they do have enough time to work on it. Developer should file -a BluePrint in the launchpad to give a detailed description about their plan after -deciding to work on a specific item. A patch should be proposed as well to remove -related workitem from the TODO list after the BP gets approval. - - -HIGH PRIORITY -============= - -API ---- - - Find and fill gaps with API-WG besides the one we already identified. - - - Add support to put a cluster to maintenance mode - -ENGINE ------- - - Complete support to list of health recovery actions. - - - Add command "node adopt --profile-type --properties network.id=\ - --resource " to adopt existing server node. - * The new command should check if the provided properties are sufficient. - * There exists a need to snapshot a server before adoption. - - -MIDDLE PRIORITY -=============== - -API ---- - - Support advanced filters as suggested by the API WG: - `Filtering Guidelines`_ - -ENGINE ------- - - Add a new property "fast_scaling" to Cluster - * A standby (user invisible) cluster is created containing the extra nodes - that amount to max_size - desired_capacity - - Perform cluster scaling based on role filters - - Perform cluster checking based on role filters - - Perform cluster recovery based on role filters - -PROFILE -------- - - Add support to snapshot/restore operations for nova server profile. The - possible use case is rapid scale. - - Add support to nova server so that "block_device_mapping_v2" can reference - an existing pool of cinder volumes. - - Add support to nova server so that "network" can reference an existing - pool of neutron ports or fixed IPs. - -POLICY ------- - - Provide support for watching all objects we created on behalf of users, like - loadbalancer which is created when attaching lb policy. - - Leverage other monitoring service for object health status monitoring. - - Health policy extension for recovery action selection based on inputs - -CLIENT ------- - - Provide role-based filtering when doing 'cluster-run' - -LOW PRIORITY -============ - -ENGINE ------- - - Allow actions to be paused and resumed. This is important for some background - actions such as health checking. - - Provide support to oslo.notification and allow nodes to receive and react - to those notifications accordingly: `Autoscaling Notifications`_ - -PROFILE -------- - - Support disk property update for os.nova.server profile - -DOC ---- - - Provide a sample conf file for customizing senlin options. - -TEST ----- - - Add more Rally profile and scenario support for Senlin. - -OTHERS ------- - - Integration with Glare for profile/policy specs storage. At least we may - want to enable users to retrieve/reference heat templates from glare when - creating profiles. - - -.. _`Filtering Guidelines`: https://specs.openstack.org/openstack/api-wg/guidelines/pagination_filter_sort.html#filtering -.. _`Autoscaling Notifications`: https://ask.openstack.org/en/question/46495/heat-autoscaling-adaptation-actions-on-existing-servers/ diff --git a/api-ref/source/actions.inc b/api-ref/source/actions.inc deleted file mode 100644 index 072e262b5..000000000 --- a/api-ref/source/actions.inc +++ /dev/null @@ -1,188 +0,0 @@ -======= -Actions -======= - -Lists all actions and shows details for an action. - - -List actions -============ - -.. rest_method:: GET /v1/actions - -Lists all actions. - -Response codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - target: target_query - - action: action_action_query - - status: action_status_query - -The sorting keys include ``name``, ``target``, ``action``, ``created_at`` -and ``status``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - actions: actions - - action: action_action - - cause: cause - - created_at: created_at - - data: action_data - - depended_by: depended_by - - depends_on: depends_on - - start_time: start_time - - end_time: end_time - - id: action_id - - inputs: inputs - - interval: interval - - name: name - - outputs: outputs - - owner: action_owner - - project: project - - status: action_status - - status_reason: status_reason - - target: action_target - - timeout: action_timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/actions-list-response.json - :language: javascript - - -Show action details -=================== - -.. rest_method:: GET /v1/actions/{action_id} - -Shows details for an action. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - action_id: action_id_url - -Response Parameters: - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - action: action_action - - cause: cause - - created_at: created_at - - data: action_data - - depended_by: depended_by - - depends_on: depends_on - - start_time: start_time - - end_time: end_time - - id: action_id - - inputs: inputs - - interval: interval - - name: name - - outputs: outputs - - owner: action_owner - - project: project - - status: action_status - - status_reason: status_reason - - target: action_target - - timeout: action_timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/action-get-response.json - :language: javascript - -Update action -============= - -.. rest_method:: PATCH /v1/actions/{action_id} - - min_version: 1.12 - -Update status of an action. - -This API is only available since API microversion 1.12. - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - action_id: action_id_url - - action: action - - status: action_status_update - - force: action_update_force_query - -Request Example ---------------- - -.. literalinclude:: samples/action-get-request.json - :language: javascript - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 diff --git a/api-ref/source/build_info.inc b/api-ref/source/build_info.inc deleted file mode 100644 index fc3c649e6..000000000 --- a/api-ref/source/build_info.inc +++ /dev/null @@ -1,50 +0,0 @@ -============================== -Build information (build-info) -============================== - -Shows build information for a Senlin deployment. - -Show build information -======================= - -.. rest_method:: GET /v1/build-info - -Shows build information for a Senlin deployment. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - build_info: build_info - - api: build_info_api - - engine: build_info_engine - -Response Example ----------------- - -.. literalinclude:: samples/build-show-response.json - :language: javascript - -This operation does not accept a request body. diff --git a/api-ref/source/cluster_policies.inc b/api-ref/source/cluster_policies.inc deleted file mode 100644 index 9ec738138..000000000 --- a/api-ref/source/cluster_policies.inc +++ /dev/null @@ -1,113 +0,0 @@ -=================================== -Cluster Policies (cluster-policies) -=================================== - -Lists all cluster policies and shows information for a cluster policy. - -List all cluster policies -========================= - -.. rest_method:: GET /v1/clusters/{cluster_id}/policies - -Lists all policies attached to specific cluster - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - enabled: enabled_query - - policy_name: name_query - - policy_type: type_query - - sort: sort - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_policies: cluster_policies - - cluster_id: cluster_id - - cluster_name: cluster_name - - enabled: cluster_policy_enabled - - id: cluster_policy_id - - policy_id: policy_id - - policy_name: policy_name - - policy_type: policy_type_name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-policies-list-response.json - :language: javascript - - -Show cluster_policy details -=========================== - -.. rest_method:: GET /v1/clusters/{cluster_id}/policies/{policy_id} - -Shows details for a cluster policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_policy: cluster_policy - - cluster_id: cluster_id - - cluster_name: cluster_name - - enabled: cluster_policy_enabled - - id: cluster_policy_id - - policy_id: policy_id - - policy_name: policy_name - - policy_type: policy_type_name - -Response Example ----------------- - -.. literalinclude:: samples/cluster-policy-show-response.json - :language: javascript diff --git a/api-ref/source/clusters.inc b/api-ref/source/clusters.inc deleted file mode 100644 index 3fa2a46f9..000000000 --- a/api-ref/source/clusters.inc +++ /dev/null @@ -1,1158 +0,0 @@ -======== -Clusters -======== - -Lists all clusters and creates, shows information for, updates, deletes, and -triggers an action on a cluster. - - -List clusters -============= - -.. rest_method:: GET /v1/clusters - -Lists clusters. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - status: status_query - -The sorting keys include ``name``, ``status``, ``init_at``, ``created_at`` -and ``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - clusters: clusters - - created_at: created_at - - config: cluster_config - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/clusters-list-response.json - :language: javascript - - -Create cluster -============== - -.. rest_method:: POST /v1/clusters - -Creates a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 500 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - config: cluster_config_req - - cluster: cluster - - name: cluster_name - - desired_capacity: desired_capacity - - profile_id: profile_identity_req - - min_size: min_size_req - - timeout: timeout_req - - max_size: max_size_req - - metadata: metadata_req - -Request Example ---------------- - -.. literalinclude:: samples/cluster-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-create-response.json - :language: javascript - - -Show cluster details -==================== - -.. rest_method:: GET /v1/clusters/{cluster_id} - -Shows details for a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-show-response.json - :language: javascript - - -Update cluster -============== - -.. rest_method:: PATCH /v1/clusters/{cluster_id} - -Updates a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - cluster: cluster - - config: cluster_config_req - - name: name_req - - profile_id: profile_identity - - timeout: timeout_req - - metadata: metadata_req - - profile_only: profile_only - -Request Example ---------------- - -.. literalinclude:: samples/cluster-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - cluster: cluster - - config: cluster_config - - created_at: created_at - - data: cluster_data - - dependents: dependents - - desired_capacity: desired_capacity - - domain: domain - - id: cluster_id - - init_at: init_at - - max_size: max_size - - metadata: metadata - - min_size: min_size - - name: name - - nodes: cluster_nodes - - policies: cluster_policies_property - - profile_id: profile_id - - profile_name: profile_name - - project: project - - status: cluster_status - - status_reason: status_reason - - timeout: timeout - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/cluster-update-response.json - :language: javascript - - -Delete cluster -============== - -.. rest_method:: DELETE /v1/clusters/{cluster_id} - -Deletes a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - -Resize a Cluster -================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Resize a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - adjustment_type: adjustment_type - - number: adjustment_number - - min_size: adjustment_min_size - - max_size: adjustment_max_size - - min_step: adjustment_min_step - - strict: adjustment_strict - - -The ``action_name`` in the request body has to be ``resize``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-resize-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Scale-in a Cluster -=================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Shrink the size of a cluster by a given number. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - count: scale_count - - -The ``action_name`` in the request body has to be ``scale_in``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-scale-in-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Scale-out a Cluster -=================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Expand the size of a cluster by a given number. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - count: scale_count - - -The ``action_name`` in the request body has to be ``scale_out``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-scale-out-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Add nodes to a Cluster -====================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Add the specified list of nodes to the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_member_nodes - - -The ``action_name`` in the request body has to be ``add_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-add-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Remove nodes from a Cluster -=========================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Remove the specified list of nodes from the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_member_nodes - - destroy_after_deletion: destroy_after_deletion - - -The ``action_name`` in the request body has to be ``del_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-del-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Replace nodes in a Cluster -=========================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Replace the specified nodes in a cluster. - -This API is only available since API microversion 1.3. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - nodes: cluster_replace_nodes - - -The ``action_name`` in the request body has to be ``replace_nodes``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-replace-nodes-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Attach a Policy to a Cluster -============================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Attach the specified policy to the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - - enabled: cluster_policy_enabled - -The ``action_name`` in the request body has to be ``policy_attach``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-attach-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Detach a Policy from a Cluster -============================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Detach the specified policy from the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - -The ``action_name`` in the request body has to be ``policy_detach``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-detach-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Update a Policy on a Cluster -============================ - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Update the specified policy on the cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - policy_id: policy_identity - - enabled: cluster_policy_enabled - -The ``action_name`` in the request body has to be ``update_policy``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-update-policy-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Collect Attributes Across a Cluster -=================================== - -.. rest_method:: GET /v1/clusters/{cluster_id}/attrs/{path} - -Aggregate an attribute value across all nodes in a cluster. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - path: path_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - cluster_attributes: cluster_attributes - - id: node_id - - value: attr_value - -Check a Cluster's Health Status -=============================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Check the health status of all nodes in a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - params: check_params - -The ``action_name`` in the request body has to be ``check``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-check-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Recover a Cluster to a Healthy Status -===================================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Recover the health status for all nodes in a cluster. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - params: recover_params - -The ``action_name`` in the body must be ``recover``. The valid parameters -include: - -- ``operation``: A string specifying the action to be performed for node - recovery. - -- ``operation_params``: An optional dictionary specifying the key-value - arguments for the specific node recovery action. - -- ``check``: A boolean specifying whether the engine should check the actual - statuses of cluster nodes before performing the recovery action. This - parameter is added since microversion 1.6 and it defaults to False. - -- ``check_capacity``: A boolean specifying whether check the current number of - nodes and the ``desired_capacity`` field. Will delete nodes if the number of - nodes is larger than ``desired_capacity``, otherwise, create nodes. This - parameter is added since microversion 1.7 and it defaults to False. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-recover-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Perform an Operation on a Cluster -================================= - -.. rest_method:: POST /v1/clusters/{cluster_id}/ops - -Perform an operation on the specified cluster. The specified operation and its -associated parameters must validate against the profile type of the cluster. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - operation: cluster_operation_request - -Request Example ---------------- - -.. literalinclude:: samples/cluster-operation-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript - - -Complete Lifecycle on a Cluster action -====================================== - -.. rest_method:: POST /v1/clusters/{cluster_id}/actions - -Complete lifecycle action and trigger deletion of nodes. - -This API is only available since API microversion 1.9. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - cluster_id: cluster_id_url - - action: action_request - - lifecycle_action_token: lifecycle_token_id - -The ``action_name`` in the body must be ``complete_lifecycle``. - -Request Example ---------------- - -.. literalinclude:: samples/cluster-complete-lifecycle-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/cluster-action-response.json - :language: javascript diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py deleted file mode 100644 index 4cf19aaba..000000000 --- a/api-ref/source/conf.py +++ /dev/null @@ -1,205 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# senlin documentation build configuration file, created by -# sphinx-quickstart on Sat May 1 15:17:47 2010. -# -# This file is execfile()d with the current directory set to -# its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -extensions = [ - 'os_api_ref', - 'openstackdocstheme', -] - -html_theme = 'openstackdocs' -html_theme_options = { - "sidebar_mode": "toc", -} - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = u'2015-present, OpenStack Foundation' - -# openstackdocstheme options -repository_name = 'openstack/senlin' -bug_project = 'senlin' -bug_tag = 'api-ref' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# The reST default role (used for this markup: `text`) to use -# for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output ---------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'senlindoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', 'Senlin.tex', u'OpenStack Clustering API Documentation', - u'OpenStack Foundation', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True diff --git a/api-ref/source/events.inc b/api-ref/source/events.inc deleted file mode 100644 index e2b9a7d65..000000000 --- a/api-ref/source/events.inc +++ /dev/null @@ -1,129 +0,0 @@ -=============== -Events (events) -=============== - -Lists all events and shows information for an event. - -List events -=========== - -.. rest_method:: GET /v1/events - -Lists all events. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - level: event_level_req - - marker: marker - - sort: sort - - global_project: global_project - - oid: oid_query - - otype: otype_query - - oname: oname_query - - cluster_id: cluster_identity_query - - action: action_name_query - -The sorting keys include ``timestamp``, ``level``, ``otype``, ``oname``, -``action``, ``status``, ``oid`` and ``cluster_id``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - events: events - - action: action_name - - cluster_id: cluster_id - - id: event_id - - level: event_level - - oid: oid - - oname: oname - - otype: otype - - project: project - - status: event_status - - status_reason: status_reason - - timestamp: event_timestamp - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/events-list-response.json - :language: javascript - - -Shows event details -=================== - -.. rest_method:: GET /v1/events/{event_id} - -Shows details for an event. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - event_id: event_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - event: event - - action: action_name - - cluster_id: cluster_id - - id: event_id - - level: event_level - - oid: oid - - oname: oname - - otype: otype - - project: project - - status: event_status - - status_reason: status_reason - - timestamp: event_timestamp - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/event-show-response.json - :language: javascript diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst deleted file mode 100644 index 09da4260f..000000000 --- a/api-ref/source/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -:tocdepth: 2 - -============== -Clustering API -============== - -.. rest_expand_all:: - -.. include:: versions.inc -.. include:: build_info.inc -.. include:: profile_types.inc -.. include:: profiles.inc -.. include:: policy_types.inc -.. include:: policies.inc -.. include:: clusters.inc -.. include:: cluster_policies.inc -.. include:: nodes.inc -.. include:: receivers.inc -.. include:: events.inc -.. include:: webhooks.inc -.. include:: actions.inc -.. include:: services.inc diff --git a/api-ref/source/nodes.inc b/api-ref/source/nodes.inc deleted file mode 100644 index 1e5a03865..000000000 --- a/api-ref/source/nodes.inc +++ /dev/null @@ -1,654 +0,0 @@ -===== -Nodes -===== - -Lists all nodes, and creates, shows information for, updates, deletes a node. - - -List nodes -========== - -.. rest_method:: GET /v1/nodes - -Lists all nodes. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - cluster_id: cluster_identity_query - - name: name_query - - status: status_query - -The sorting keys include ``name``, ``index``, ``status``, ``init_at``, -``created_at`` and ``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - nodes: nodes - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-list-response.json - :language: javascript - - -Create node -=========== - -.. rest_method:: POST /v1/nodes - -Creates a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node: node - - role: role_req - - profile_id: profile_identity_req - - cluster_id: node_cluster_identity - - name: node_name - - metadata: metadata_req - -Request Example ---------------- - -.. literalinclude:: samples/node-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-create-response.json - :language: javascript - - -Adopt node -========== - -.. rest_method:: POST /v1/nodes/adopt - - min_version: 1.7 - -Adopts a node. - -This API is only available since API microversion 1.7. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - identity: identity - - metadata: metadata_req - - name: node_name_adopt - - overrides: overrides - - role: role_req - - snapshot: snapshot - - type: profile_type_name - -Request Example ---------------- - -.. literalinclude:: samples/node-adopt-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-adopt-response.json - :language: javascript - - -Adopt node (preview) -==================== - -.. rest_method:: POST /v1/nodes/adopt-preview - - min_version: 1.7 - -Preview a node adoption. - -This API is only available since API microversion 1.7. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - identity: identity - - overrides: overrides - - snapshot: snapshot - - type: profile_type_name - -Request Example ---------------- - -.. literalinclude:: samples/node-adopt-preview-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node_preview: node_preview - - cluster_id: cluster_id - - type: profile_type_name - - version: profile_type_version - - properties: profile_spec - -Response Example ----------------- - -.. literalinclude:: samples/node-adopt-preview-response.json - :language: javascript - - -Show node details -================= - -.. rest_method:: GET /v1/nodes/{node_id} - -Shows details about a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - - show_details: show_details - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-show-response.json - :language: javascript - - -Update node -=========== - -.. rest_method:: PATCH /v1/nodes/{node_id} - -Updates a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - - node: node - - name: name_req - - profile_id: profile_identity - - role: role_req - - metadata: metadata_req - - tainted: tainted_req - -Request Example ---------------- - -.. literalinclude:: samples/node-update-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - node: node - - cluster_id: cluster_id - - created_at: created_at - - data: node_data - - dependents: dependents - - domain: domain - - id: node_id - - index: index - - init_at: init_at - - metadata: metadata - - name: name - - physical_id: physical_id - - profile_id: profile_id - - profile_name: profile_name - - project: project - - role: role - - status: node_status - - status_reason: status_reason - - tainted: tainted - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/node-show-response.json - :language: javascript - - -Delete node -=========== - -.. rest_method:: DELETE /v1/nodes/{node_id} - -Deletes a node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - node_id: node_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - -Check a Node's Health -===================== - -.. rest_method:: POST /v1/nodes/{node_id}/actions - -Check the health status of the specified node. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - action: action_request - -The ``action_name`` in the body must be ``check``. - -Request Example ---------------- - -.. literalinclude:: samples/node-check-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript - - - -Recover a Node to Healthy Status -================================ - -.. rest_method:: POST /v1/nodes/{node_id}/actions - -Recover the specified node to its healthy status. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - action: action_request - -The ``action_name`` in the body must be ``recover``. The valid parameters -include: - -- ``operation``: A string specifying the action to be performed for node - recovery. - -- ``operation_params``: An optional dictionary specifying the key-value - arguments for the specific node recovery action. - -- ``check``: A boolean specifying whether the engine should check the node's - actual status before performing the recovery action. This parameter is added - since microversion 1.6. - - -Request Example ---------------- - -.. literalinclude:: samples/node-recover-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript - - -Perform an Operation on a Node -============================== - -.. rest_method:: POST /v1/nodes/{node_id}/ops - - min_version: 1.4 - -Perform the specified operation on the specified node. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - node_id: node_id_url - - operation: operation_request - -Request Example ---------------- - -.. literalinclude:: samples/node-operation-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/node-action-response.json - :language: javascript diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml deleted file mode 100644 index 5e3060c2f..000000000 --- a/api-ref/source/parameters.yaml +++ /dev/null @@ -1,1546 +0,0 @@ -#### header parameters ####################################################### - -location: - type: string - in: header - required: True - description: | - For asynchronous object operations, the ``location`` header contains a - string that can be interpreted as a relative URI from where users can - track the progress of the action triggered. - -microversion: - type: string - in: header - description: | - API microversion request. It takes the form of - ``OpenStack-API-Version: clustering 1.0``, where ``1.0`` is the requested - API version. - -request_id: - type: string - in: header - description: | - A unique ID for tracking service request. The request ID associated - with the request by default appears in the service logs. - -#### path parameters ######################################################### - -action_id_url: - type: string - in: path - required: True - description: | - The name or short-ID or UUID that identifies an action object. - -cluster_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a cluster object. - -event_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of an event object. - -node_id_url: - type: string - in: path - required: True - description: | - The name, short-ID or UUID of a node object. - -path_url: - type: string - in: path - required: True - description: | - A Json path format string for node attribute. - -policy_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a policy object. - -policy_type_url: - type: string - in: path - required: True - description: | - The name of a policy type. - -profile_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a profile. - -profile_type_url: - type: string - in: path - required: True - description: | - The name of a profile type. - -receiver_id_url: - type: string - in: path - required: True - description: | - The name, UUID or short-UUID of a receiver object. - -version_url: - type: string - in: path - required: True - description: | - A string indicating the major version of Clustering API. - -webhook_id_url: - type: UUID - in: path - required: True - description: | - The UUID of a webhook object. - -#### query parameters ######################################################## - -action_action_query: - type: string - in: query - description: | - Filters the resulted list using the ``action`` field of the object. - -action_name_query: - type: string - in: query - description: | - Filters the response by the action name associated with an event. - Use this filter multiple times to filter by multiple actions. - -action_status_query: - type: string - in: query - description: | - Filters the results by the ``status`` property of an action object. - -action_update_force_query: - type: boolean - in: query - description: | - A boolean indicating if the action update request should be forced. - -cluster_identity_query: - type: string - in: query - description: | - The name, short-ID or UUID of the cluster object. - -enabled_query: - type: string - in: query - description: | - Filters the response by a policy enabled status on the cluster. - -global_project: - type: boolean - in: query - default: False - description: | - Indicates whether to include resources for all projects or resources for - the current project in the response. - - If you are an administrative user and you set this value to ``true``, the - call returns all resources from all projects. Default is ``false``, which - returns only resources in the current project. - -limit: - type: integer - in: query - description: | - Requests a page size of resources. Returns a number of resources up to the - limit value. Use the `limit` parameter to make an initial limited request - and use the ID of the last-seen resource from the response as the `marker` - parameter value in a subsequent limited request. - -marker: - type: UUID - in: query - description: | - The ID of the last-seen resource. Use the `limit` parameter to make an - initial limited request and use the ID of the last-seen resource from the - response as the `marker` parameter value in a subsequent limited request. - -name_query: - type: string - in: query - description: | - Filters the response by the specified ``name`` property of the object, - such as ``policy_name`` or ``name`` property of cluster. - -oid_query: - type: string - in: query - description: | - Filters the response by the ``ID`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. - -oname_query: - type: string - in: query - description: | - Filters the response by the ``name`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. - -otype_query: - type: string - in: query - description: | - Filters the response by the ``type`` of object associated with an event. - Use this filter multiple times to filter by multiple objects. A valid - value is ``CLUSTER`` or ``NODE``. - -receiver_action_query: - type: string - in: query - description: | - Filters the response by the action targeted by the receiver. - -receiver_type_query: - type: string - in: query - description: | - Filters the response by the ``type`` property of the receiver. - -receiver_user_query: - type: string - in: query - description: | - Filters the response by the ``user`` property of the receiver. - min_version: 1.4 - -show_details: - type: boolean - in: query - default: False - required: False - description: | - A boolean indicating whether the detailed information about the physical - resource associated with the node object will be returned. - -sort: - type: string - in: query - description: | - Sorts the response by one or more attribute and optional sort direction - combinations. A valid direction is ``asc`` (ascending) or ``desc`` - (descending). Default direction is ``asc`` (ascending). - - Specify the list as ``[:]``. - - For example, the following query parameters in the URI sort the resources - in the response by ``name`` in ascending order and then by ``status`` in - descending order:: - - GET /v1/clusters?sort=name:asc,status:desc - -status_query: - type: string - in: query - description: | - Filters the resource collection by the ``status`` property. - -target_query: - type: string - in: query - description: | - Filters the results by the UUID of the targeted object which is usually - a cluster. - -type_query: - type: string - in: query - description: | - Filters the response by the specified ``type`` property of the object, - such as ``policy_type`` property of cluster-policy binding object or - ``type`` property of policy object. - -user_query: - type: UUID - in: query - description: | - Filters the response by the ``user`` property of the resource. - -webhook_params: - type: object - in: query - description: | - The query string that forms the inputs to use for the targeted action - for API microversion less than 1.10. - -webhook_version: - type: string - in: query - required: True - description: | - The webhook implementation version requested. - - -#### body parameters ######################################################### - -action: - type: object - in: body - required: True - description: | - A structured definition of an action object. - -action_action: - type: string - in: body - required: True - description: | - A string representation of the action for execution. - -action_data: - type: object - in: body - required: True - description: | - A structured representation of data associated with an action object. - -action_id: - type: UUID - in: body - required: True - description: | - A UUID that uniquely identifies an action object. - -action_name: - type: string - in: body - required: True - description: | - The name of an action object. - -action_owner: - type: string - in: body - required: True - description: | - The UUID of the owning engine that is currently locking the action for - execution. - -action_request: - type: object - in: body - required: True - description: | - A structured definition of an action to be executed. The object is - usually expressed as:: - - : { - : - : - ... - } - - The ```` indicates the requested action while the ```` - keys provide the associated parameters to the action. Each individual - action has its own set of parameters. - -action_status: - type: string - in: body - required: True - description: | - A string representation of the current status of the action. - -action_status_update: - type: string - in: body - required: True - description: | - A string representation of the action status to update. CANCELLED is - the only valid status at this time. - -action_target: - type: string - in: body - required: True - description: | - The UUID of the targeted object (which is usually a cluster). - -action_timeout: - type: integer - in: body - required: True - description: | - The number of seconds after which an unfinished action execution will be - treated as timeout. - -actions: - type: array - in: body - required: True - description: | - A list of action objects. - -adjustment_max_size: - type: integer - in: body - description: | - The value to be set as the new ``max_size`` of the cluster. - -adjustment_min_size: - type: integer - in: body - description: | - The value to be set as the new ``min_size`` of the cluster. - -adjustment_min_step: - type: integer - in: body - description: | - When ``adjustment_type`` is set to ``CHANGE_IN_PERCENTAGE``, often times - the computed value is a float which could be less than 1.0. The - ``min_step`` can be used to specify that at least this number of nodes will - be added or removed. - -adjustment_number: - type: number - in: body - description: | - The number of adjustment. The interpretation of the value depends on the - value of the ``adjustment_type`` parameter. This parameter is mandatory - when ``adjustment_type`` is specified. Otherwise, it is optional. - - When ``adjustment_type`` is specified as ``CHANGE_IN_PERCENTAGE``, the - value of this parameter can be a float number, otherwise it has to be an - integer. - -adjustment_strict: - type: boolean - in: body - default: False - description: | - There are cases where the computed number of nodes to adjust will break - the size constraints of a cluster, i.e. its ``min_size`` or ``max_size`` - property. If this is the case, the ``strict`` parameter can further - instructs the senlin engine whether the resize should be done on a best - effort basis. If the value is set to True, senlin engine will perform the - resize operation while respecting the cluster's size constraints. - Otherwise, if the computed adjustment will break the size constraints, the - resize request will be directly rejected. - -adjustment_type: - type: string - in: body - description: | - The type of size adjustment. The valid values are: - - - ``EXACT_CAPACITY``: The adjustment number specified is to be interpreted - as the targeted ``desired_capacity``. This value has to be a non-negative - integer. - - ``CHANGE_IN_CAPACITY``: The adjustment number specified is to be treated - as the number of nodes to add or remove. The value has to be a non-zero - integer. A positive number can be used to specify the number of nodes to - add while a negative number can be specified to indicate the number of - nodes to remove. - - ``CHANGE_IN_PERCENTAGE``: The adjustment number will be interpreted as - a percentile relative to a cluster's current ``desired_capacity``. The - adjustment number can be a positive or negative float value. - - This parameter is optional when a resize request is only about changing the - ``min_size`` and/or ``max_size`` of the cluster. Otherwise, it is required. - When this parameter is specified, the ``number`` parameter has to be - provided as well. - -attr_value: - type: object - in: body - description: | - The attribute value on a specific node. The value could be of any data - type that is valid for the attribute. - -binary: - type: string - in: body - required: True - description: | - The binary name of the service. - -build_info: - type: object - in: body - required: True - description: | - Build information for a Senlin deployment. - -build_info_api: - type: object - in: body - required: True - description: | - Revision information of Senlin API service. - -build_info_engine: - type: object - in: body - required: True - description: | - Revision information of Senlin engine service. - -cause: - type: string - in: body - required: True - description: | - An explanation why an action was started. - -check_params: - type: object - in: body - description: | - The optional parameters provided to a cluster check operation. The detailed - keys and values are not checked at the moment. - -cluster: - type: object - in: body - required: True - description: | - The structured definition of a cluster object. - -cluster_attributes: - type: array - in: body - required: True - description: | - A list of dictionaries each containing the node ID and the corresponding - attribute value. - -cluster_config: - type: object - in: body - required: True - description: | - The structured config associated with the cluster. - -cluster_config_req: - type: object - in: body - required: False - description: | - The structured config associated with the cluster. - -cluster_data: - type: object - in: body - required: True - description: | - The structured data associated with the cluster. - -cluster_id: - type: UUID - in: body - required: True - description: | - The UUID of the cluster object. - -cluster_identity: - type: UUID - in: body - required: False - description: | - The ID, short ID or name of a cluster which the adopted node is supposed - to join. - -cluster_member_nodes: - type: array - in: body - required: True - description: | - The candidate nodes to be added to or removed from a cluster. The meaning - of the parameter is depended on the action requested. - - Each item in the list can be the name, the short-ID or the UUID of a node. - -cluster_name: - type: string - in: body - required: True - description: | - The name of a cluster object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -cluster_nodes: - type: array - in: body - required: True - description: | - A list of the UUIDs of node objects which are members of the current - cluster. - -cluster_operation_request: - type: object - in: body - required: True - description: | - A structured definition of an operation to be performed. The object is - usually expressed as:: - - : { - filters: { - : , - : - } - params: { - : , - : - ... - } - } - - The ```` specifies the operation to be performed, in which - the ``filters`` object contains a collection of filtering rules, and the - ``params`` object provide the parameters (if any) to the operation. - Each individual operation has its own set of parameters, as supported by - the profile type of the target cluster. - -cluster_policies: - type: array - in: body - required: True - description: | - A list of cluster_policy objects. - -cluster_policies_property: - type: array - in: body - required: True - description: | - A list of UUIDs of the policies attached to current cluster. - -cluster_policy: - type: object - in: body - required: True - description: | - The structured description of a cluster_policy object. - -cluster_policy_enabled: - type: boolean - in: body - required: True - description: | - Whether the policy is enabled on the attached cluster. - -cluster_policy_id: - type: UUID - in: body - required: True - description: | - The UUID of a cluster_policy object. - -cluster_replace_nodes: - type: object - in: body - required: True - description: | - A collection of key-value pairs. Each key is the node to be replaced of a - cluster, each value is the node used to replace the original one. - - Each item in of the key-value pairs can be the name, the short-ID or the - UUID of a node. - -cluster_status: - type: string - in: body - required: True - description: | - The string representation of the current status of the cluster. - -clusters: - type: array - in: body - required: True - description: | - A list of cluster objects. - -created_at: - type: string - in: body - required: True - description: | - The date and time when the object was created. The date and time stamp - format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -depended_by: - type: array - in: body - required: True - description: | - A list of UUIDs of the actions that depend on the current action. - -dependents: - type: object - in: body - required: True - description: | - A dict contains dependency information between nova server, heat stack - cluster and container cluster. - -depends_on: - type: array - in: body - required: True - description: | - A list of UUIDs of the actions that the current action depends on. - -desired_capacity: - type: integer - in: body - required: True - description: | - The desired capacity of a cluster. When creating a cluster, this value is - set to 0 by default. - -destroy_after_deletion: - type: boolean - in: body - required: False - description: | - Whether deleted nodes to be destroyed right away. - min_version: 1.4 - -disabled_reason: - type: string - in: body - required: False - description: | - The reason for disabling a service. - -domain: - type: UUID - in: body - required: True - description: | - The ID of the domain a resource is created in. - -end_time: - type: float - in: body - required: True - description: | - A floating point number that represents when an action's execution has - completed. - -event: - type: object - in: body - required: True - description: | - The structured description of an event object. - -event_id: - type: UUID - in: body - required: True - description: | - The UUID of an event object. - -event_level: - type: string - in: body - required: True - description: | - The level of an event object. - -event_level_req: - type: string - in: body - required: False - description: | - The level of an event object. - -event_status: - type: string - in: body - required: True - description: | - The current status of the object associated with the event. - -event_timestamp: - type: string - in: body - required: True - description: | - The date and time when the event was generated. The date and time stamp - format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. - -events: - type: array - in: body - required: True - description: | - A list of event objects. - -host: - type: string - in: body - required: True - description: | - The name of the host. - -identity: - type: string - in: body - required: True - description: | - The ID or name of the physical resource to be adopted. - -index: - type: integer - in: body - required: True - description: | - An integer that uniquely identifies a node within its owning cluster. - -init_at: - type: string - in: body - required: True - description: | - The date and time when the object was initialized. The date and - time stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -inputs: - type: object - in: body - required: True - description: | - A collection of key-value pairs that are fed to the action as input - parameters. - -interval: - type: integer - in: body - required: True - description: | - An integer that indicates the interval in seconds between two consecutive - executions of a repeatable action. - -lifecycle_token_id: - type: UUID - in: body - required: True - description: | - The UUID of the lifecycle action to be completed. - -max_size: - type: integer - in: body - required: True - description: | - The maximum size of a cluster, i.e. the maximum number of nodes that can - be members of the cluster. A value of -1 means that the cluster doesn't - have an upper bound regarding the number of member nodes. - -max_size_req: - type: integer - default: -1 - in: body - required: False - description: | - The maximum size of a cluster, i.e. the maximum number of nodes that can - be members of the cluster. A value of -1 means that the cluster doesn't - have an upper bound regarding the number of member nodes. - -metadata: - type: object - in: body - required: True - description: | - A collection of key-value pairs associated with an object. - -metadata_req: - type: object - in: body - description: | - A collection of key-value pairs associated with an object. - -min_size: - type: integer - in: body - required: True - description: | - The minimum size of a cluster, i.e. the minimum number of nodes that can - be members of the cluster. - -min_size_req: - type: integer - default: 0 - in: body - required: False - description: | - The minimum size of a cluster, i.e. the minimum number of nodes that can - be members of the cluster. - -name: - type: string - in: body - required: True - description: - The name of the object in question. - -name_req: - type: string - in: body - required: False - description: - The new name of the object in question. - -node: - type: object - in: body - required: True - description: | - A structured description of a node object. - -node_cluster_identity: - type: string - in: body - required: False - description: | - The name, short-ID or UUID of the cluster object a node belongs to. - -node_data: - type: object - in: body - required: True - description: | - A map containing key-value pairs associated with a node object. - -node_id: - type: UUID - in: body - required: True - description: | - A UUID string that uniquely identifies a node object. - -node_name: - type: string - in: body - required: True - description: | - The name of a node object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -node_name_adopt: - type: string - in: body - required: False - description: | - The name of a node object. If specified, the name must start with an ASCII - letter and can contain ASCII letters, digits, underscores, periods, and - hyphens and its length must be less than 255. - -node_preview: - type: object - in: body - required: True - description: | - A structured representation of the node to be adopted. Note this is a - preview version which only contains the spec of the profile to be created. - -node_status: - type: string - in: body - required: True - description: | - The string representation of the current status of the node object. - -nodes: - type: array - in: body - required: True - description: | - A list of node objects. - -oid: - type: UUID - in: body - required: True - description: | - The UUID of an object associated with the event. - -oname: - type: string - in: body - required: True - description: | - The name of an object associated with the event. - -operation_request: - type: object - in: body - required: True - description: | - A structured definition of an operation to be performed. The object is - usually expressed as:: - - : { - : - : - ... - } - - The ```` specifies the operation to be performed while the - ```` keys provide the parameters (if any) to the operation. Each - individual operation has its own set of parameters, as supported by the - profile type of the target cluster or node. - -operations: - type: object - in: body - required: True - description: | - A dictionary containing the description of operations (and parameters) - supported by a profile type. - -otype: - type: string - in: body - required: True - description: | - The type of an object associated with the event. - -outputs: - type: object - in: body - required: True - description: | - A collection of key-value pairs that were produced during the execution of - an action as its outputs. - -overrides: - type: object - in: body - required: False - description: | - If specified, provides a collection of key-value pairs that will override - the property name and values extracted from the spec extracted from the - existing physical node. - -physical_id: - type: UUID - in: body - required: True - description: | - The UUID of the physical resource represented by the node object. - -policies: - type: array - in: body - required: True - description: | - A list of policy objects. - -policy: - type: object - in: body - required: True - description: | - A structured description of a policy object. - -policy_data: - type: object - in: body - required: True - description: | - A structured representation of data associated with a policy object. - -policy_id: - type: UUID - in: body - required: True - description: | - The UUID of a policy object. - -policy_identity: - type: string - in: body - required: True - description: | - The name, UUID or short-UUID of a policy object. - -policy_name: - type: string - in: body - required: True - description: | - The name of a policy object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -policy_spec: - type: object - in: body - required: True - description: | - The detailed specification of a policy object. - -policy_type: - type: object - in: body - required: True - description: | - A structured description of a policy type. Since API micro-version 1.5, - a "support_status" property is returned which contains a list - of support status changes. - -policy_type_name: - type: string - in: body - required: True - description: | - The name of the policy type. - -policy_type_schema: - type: object - in: body - required: True - description: | - The schema of a policy type. The schema of a policy type varies a lot - based on the specific type implementation. - -policy_types: - type: array - in: body - required: True - description: | - A list of policy_type objects. Since API micro-version 1.5, each record - in the list will have a "support_status" property which contains a list - of support status changes. - -profile: - type: object - in: body - required: True - description: | - A structured description of a profile object. - -profile_id: - type: UUID - in: body - required: True - description: | - The UUID of the profile. - -profile_identity: - type: string - in: body - required: False - description: | - The name, short-ID, or UUID of a profile. - -profile_identity_req: - type: string - in: body - required: True - description: | - The name, short-ID, or UUID of a profile. - -profile_name: - type: string - in: body - required: True - description: | - The name of a profile object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -profile_only: - type: boolean - in: body - required: False - description: | - Whether the update of profile is limited to the target cluster. All nodes - in the cluster will be updated with the specified new profile if this - parameter is set to False. The default value is False. - min_version: 1.6 - -profile_spec: - type: object - in: body - required: True - description: | - The detailed specification of the profile. - -profile_type: - type: object - in: body - required: True - description: | - A structured description of a profile type. Since API micro-version 1.5, - a "support_status" property is returned which contains a list - of support status changes. - -profile_type_name: - type: string - in: body - required: True - description: | - The name of the profile type. - -profile_type_schema: - type: object - in: body - required: True - description: | - The schema of a profile type. The schema of a profile type varies - a lot based on the specific type implementation. All profile types - share the ``context`` property which is a dictionary for customizing - the request context to authenticate with a backend service. A common - usage of this property is to set the ``region_name`` in the dictionary - so that a node can be created in the specified region. All other - properties are defined by a particular profile type implementation. - -profile_type_version: - type: string - in: body - required: True - description: | - The version of the profile type. - -profile_types: - type: array - in: body - required: True - description: | - A list of profile_type objects. Since API micro-version 1.5, each record - in the list will have a "support_status" property which contains a list - of support status changes. - -profiles: - type: array - in: body - required: True - description: | - A list for profile objects. - -project: - type: UUID - in: body - required: True - description: | - The ID of the project a resource is created in. - -receiver: - type: object - in: body - required: True - description: | - The structured definition of a receiver object. - -receiver_action: - type: string - in: body - description: | - The action to initiate when the receiver is triggered. A valid value - should be the name of an action that can be applied on a cluster. - -receiver_action_req: - type: string - in: body - required: False - description: | - The action to initiate when the receiver is triggered. A valid value - should be the name of an action that can be applied on a cluster. - -receiver_actor: - type: object - in: body - required: False - description: | - A map of key and value pairs to use for authentication. - -receiver_channel: - type: object - in: body - required: True - description: | - The target to be used by user to trigger a receiver. For webhook type - of receiver, channel is a webhook URL. - -receiver_cluster_identity: - type: string - in: body - description: | - The name, short-ID or UUID of the cluster object a node belongs to. - -receiver_id: - type: UUID - in: body - required: True - description: | - The UUID of the receiver object. - -receiver_name: - type: string - in: body - required: True - description: | - The name of a receiver object. The name must start with an ASCII letter - and can contain ASCII letters, digits, underscores, periods, and hyphens - and its length must be less than 255. - -receiver_params: - type: object - in: body - required: True - description: | - A map of key and value pairs to use for action creation. - -receiver_params_req: - type: object - in: body - required: False - description: | - A map of key and value pairs to use for action creation. Some actions - might require certain input parameters. - -receiver_type: - type: string - in: body - required: True - description: | - The type of the receiver. - -receiver_type_req: - type: string - in: body - required: True - description: | - The type of the receiver. The valid values include ``webhook`` and - ``message``. - -receivers: - type: array - in: body - required: True - description: | - A list for receiver objects. - -recover_params: - type: object - in: body - description: | - The optional parameters provided to a cluster recover operation. The - detailed keys and values are not checked at the moment. - -role: - type: string - in: body - required: True - description: | - A string describing the role played by a node inside a cluster. - -role_req: - type: string - in: body - description: | - A string describing the new role played by a node inside a cluster. - -scale_count: - type: integer - in: body - default: 1 - description: | - The number of new nodes to add to or remove from the specified cluster. - The interpretation is depending on the action requested. Default value is - 1. - -service_id: - type: UUID - in: body - required: True - description: | - A UUID that uniquely identifies an service object. - -service_state: - type: string - in: body - required: True - description: | - The state of the service. One of ``up`` or ``down``. - -service_status: - type: string - in: body - required: True - description: | - The status of the service. One of ``enabled`` or ``disabled``. - -services: - type: array - in: body - required: True - description: | - A list of service. - -snapshot: - type: bool - in: body - required: False - description: | - A flat indicating whether a shapshot of the existing physical object should - be created before the object is adopted as a node. - -start_time: - type: float - in: body - required: True - description: | - A floating point number that represents the time when an action started - execution. - -status_reason: - type: string - in: body - required: True - description: | - The string representation of the reason why the object has transited to - its current status. - -tainted: - type: bool - in: body - required: True - description: | - A boolean indicating whether a node is considered tainted. Tainted nodes - are selected first during scale-in operations. This field is only - returned starting with API microversion 1.13 or greater. - -tainted_req: - type: bool - in: body - required: False - description: | - A boolean indicating whether a node is considered tainted. Tainted nodes - are selected first during scale-in operations. This parameter is only - accepted starting with API microversion 1.13 or greater. - -timeout: - type: integer - in: body - required: True - description: | - The default timeout value (in seconds) of cluster operations. - -timeout_req: - type: integer - in: body - required: False - description: | - The new timeout value (in seconds) of cluster operations. - -topic: - type: string - in: body - required: True - description: | - The topic name of the service. - -updated_at: - type: string - in: body - required: True - description: | - The date and time when the object was last updated. The date and time - stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -user: - type: UUID - in: body - required: True - description: | - The ID of the user an object is created by. - -version: - type: object - in: body - required: True - description: | - The details about a major API version. - -version_id: - type: string - in: body - required: True - description: | - The string representation of an API version number, e.g. ``1.0``. - -version_links: - type: array - in: body - required: True - description: | - A list of relative URLs to different version objects. - -version_max_version: - type: string - in: body - required: True - description: | - The string representation of the maximum microversion supported. - -version_media_types: - type: array - in: body - required: True - description: | - A list of content-type based media type request supported. - -version_min_version: - type: string - in: body - required: True - description: | - The string representation of the minimum microversion supported. - -version_status: - type: string - in: body - required: True - description: | - A string indicating the supporting status of the version. - -version_updated: - type: string - in: body - required: True - description: | - The date and time when the version was last updated. The date and time - stamp format is ISO8601: ``CCYY-MM-DDThh:mm:ssZ``. For example: - ``2016-01-18T00:00:00Z`` - -versions: - type: array - in: body - required: True - description: | - A list of supported major API versions. diff --git a/api-ref/source/policies.inc b/api-ref/source/policies.inc deleted file mode 100644 index f0c28e6dd..000000000 --- a/api-ref/source/policies.inc +++ /dev/null @@ -1,358 +0,0 @@ -=================== -Policies (policies) -=================== - -Lists all policies and creates, shows information for, updates, and deletes a -policy. - - -List policies -============= - -.. rest_method:: GET /v1/policies - -Lists all policies. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - type: type_query - -The sorting keys include ``name``, ``type``, ``created_at`` and -``udpated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policies: policies - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-list-response.json - :language: javascript - - -Create policy -============= - -.. rest_method:: POST /v1/policies - -Creates a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy: policy - - name: policy_name - - spec: policy_spec - -Request Example ---------------- - -.. literalinclude:: samples/policy-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-create-response.json - :language: javascript - - -Show policy details -=================== - -.. rest_method:: GET /v1/policies/{policy_id} - -Shows details for a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-show-response.json - :language: javascript - - -Update policy -============= - -.. rest_method:: PATCH /v1/policies/{policy_id} - -Updates a policy. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - - policy: policy - - name: name - -Note that the only property that can be updated on a policy object after -creation is ``name``. - -Request Example ---------------- - -.. literalinclude:: samples/policy-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-update-response.json - :language: javascript - - -Delete policy -============= - -.. rest_method:: DELETE /v1/policies/{policy_id} - -Deletes a policy. - -Response Codes --------------- - -A policy cannot be deleted if it is still attached to cluster(s). In that -case, a 409 error will be returned. - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_id: policy_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - -Validate policy -=============== - -.. rest_method:: POST /v1/policies/validate - -Validates a policy. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy: policy - - spec: policy_spec - -Request Example ---------------- - -.. literalinclude:: samples/policy-validate-request.json - :language: javascript - -Response Parameters -------------------- - -The response contains properties as if the policy has been created. - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy: policy - - created_at: created_at - - data: policy_data - - domain: domain - - id: policy_id - - name: name - - project: project - - spec: policy_spec - - type: policy_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/policy-validate-response.json - :language: javascript diff --git a/api-ref/source/policy_types.inc b/api-ref/source/policy_types.inc deleted file mode 100644 index 7b312c953..000000000 --- a/api-ref/source/policy_types.inc +++ /dev/null @@ -1,111 +0,0 @@ -=========================== -Policy Types (policy-types) -=========================== - -Lists all policy types and shows details for a policy type. - -List policy types -================= - -.. rest_method:: GET /v1/policy-types - -Lists all supported policy types. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy_types: policy_types - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name for -each policy type: - -.. literalinclude:: samples/policy-types-list-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of each -policy type and the version is provided using a separate key: - -.. literalinclude:: samples/policy-types-list-response-v1.5.json - :language: javascript - - -Show policy type details -======================== - -.. rest_method:: GET /v1/policy-types/{policy_type} - -Shows details for a policy type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - policy_type: policy_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - policy_type: policy_type - - name: policy_type_name - - schema: policy_type_schema - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name and -schema of the specified policy type: - -.. literalinclude:: samples/policy-type-show-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of the -specified policy type: - -.. literalinclude:: samples/policy-type-show-response-v1.5.json - :language: javascript - diff --git a/api-ref/source/profile_types.inc b/api-ref/source/profile_types.inc deleted file mode 100644 index 2d8b2aa11..000000000 --- a/api-ref/source/profile_types.inc +++ /dev/null @@ -1,156 +0,0 @@ -============================= -Profile Types (profile-types) -============================= - -Lists all profile types and shows details for a profile type. - -List profile types -================== - -.. rest_method:: GET /v1/profile-types - -Lists supported profile types. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile_types: profile_types - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name for -each profile type: - -.. literalinclude:: samples/profile-types-list-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of each -profile type and the version is provided using a separate key: - -.. literalinclude:: samples/profile-types-list-response-v1.5.json - :language: javascript - - -Show profile type details -========================= - -.. rest_method:: GET /v1/profile-types/{profile_type} - -Shows details for a profile type. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_type: profile_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile_type: profile_type - - name: profile_type_name - - schema: profile_type_schema - -Response Example ----------------- - -For API microversion lower than 1.5, the response only contains the name and -schema of the specified profile type: - -.. literalinclude:: samples/profile-type-show-response.json - :language: javascript - -Since API microversion 1.5, the response contains the support status of the -specified profile type: - -.. literalinclude:: samples/profile-type-show-response-v1.5.json - :language: javascript - - -List profile type operations -============================ - -.. rest_method:: GET /v1/profile-types/{profile_type}/ops - -List operations and parameters supported by a profile type. - -This API is only available since API microversion 1.4. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_type: profile_type_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - operations: operations - -Response Example ----------------- - -.. literalinclude:: samples/profile-type-ops-response.json - :language: javascript diff --git a/api-ref/source/profiles.inc b/api-ref/source/profiles.inc deleted file mode 100644 index e7371f1dd..000000000 --- a/api-ref/source/profiles.inc +++ /dev/null @@ -1,356 +0,0 @@ -=================== -Profiles (profiles) -=================== - -Lists all profiles and creates, shows information for, updates, and deletes a -profile. - -List profiles -============= - -.. rest_method:: GET /v1/profiles - -Lists all profiles. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - global_project: global_project - - limit: limit - - marker: marker - - name: name_query - - sort: sort - - type: type_query - -The sorting keys include ``name``, ``type``, ``created_at`` and -``updated_at``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profiles: profiles - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-list-response.json - :language: javascript - - -Create profile -============== - -.. rest_method:: POST /v1/profiles - -Creates a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile: profile - - name: profile_name - - metadata: metadata_req - - spec: profile_spec - -Request Example ---------------- - -.. literalinclude:: samples/profile-create-request.json - :language: javascript - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-create-response.json - :language: javascript - - -Show profile details -==================== - -.. rest_method:: GET /v1/profiles/{profile_id} - -Shows details for a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-show-response.json - :language: javascript - - -Update profile -============== - -.. rest_method:: PATCH /v1/profiles/{profile_id} - -Updates a profile. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - profile: profile - - metadata: metadata_req - - name: name_req - -Request Example ---------------- - -.. literalinclude:: samples/profile-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-update-response.json - :language: javascript - - -Delete profile -============== - -.. rest_method:: DELETE /v1/profiles/{profile_id} - -Deletes a profile. - -Response Codes --------------- - -A profile cannot be deleted if it is still used by node or cluster. In that -case, a 409 error will be returned. - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 409 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile_id: profile_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - -Validate profile -================ - -.. rest_method:: POST /v1/profiles/validate - -Validates a profile. - -This API is only available since API microversion 1.2. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - profile: profile - - spec: profile_spec - -Request Example ---------------- - -.. literalinclude:: samples/profile-validate-request.json - :language: javascript - -Response Parameters -------------------- - -The response contains properties as if the profile is created. - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - profile: profile - - created_at: created_at - - domain: domain - - id: profile_id - - metadata: metadata - - name: name - - project: project - - spec: profile_spec - - type: profile_type_name - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/profile-validate-response.json - :language: javascript diff --git a/api-ref/source/receivers.inc b/api-ref/source/receivers.inc deleted file mode 100644 index bc1938490..000000000 --- a/api-ref/source/receivers.inc +++ /dev/null @@ -1,360 +0,0 @@ -===================== -Receivers (receivers) -===================== - -Lists all receivers and creates, shows information for, and deletes a receiver. - - -List receivers -============== - -.. rest_method:: GET /v1/receivers - -Lists all receivers. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - limit: limit - - marker: marker - - sort: sort - - global_project: global_project - - name: name_query - - type: receiver_type_query - - cluster_id: cluster_identity_query - - action: receiver_action_query - - user: receiver_user_query - -The sorting keys include ``name``, ``type``, ``action``, ``cluster_id``, -``created_at`` and ``user``. - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receivers: receivers - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receivers-list-response.json - :language: javascript - - -Create receiver -=============== - -.. rest_method:: POST /v1/receivers - -Creates a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 201 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 500 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver: receiver - - name: receiver_name - - cluster_id: receiver_cluster_identity - - type: receiver_type_req - - action: receiver_action - - actor: receiver_actor - - params: receiver_params_req - -Request Example ---------------- - -.. literalinclude:: samples/receiver-create-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-create-response.json - :language: javascript - - -Show receiver details -===================== - -.. rest_method:: GET /v1/receivers/{receiver_id} - -Shows details for a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-show-response.json - :language: javascript - - -Update receiver -================== - -.. rest_method:: PATCH /v1/receivers/{receiver_id} - - min_version: 1.7 - -Updates a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - receiver: receiver - - name: name_req - - action: receiver_action_req - - params: receiver_params_req - - -Request Example ---------------- - -.. literalinclude:: samples/receiver-update-request.json - :language: javascript - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - receiver: receiver - - action: receiver_action - - actor: receiver_actor - - channel: receiver_channel - - cluster_id: cluster_id - - created_at: created_at - - domain: domain - - id: receiver_id - - name: name - - params: receiver_params - - project: project - - type: receiver_type - - updated_at: updated_at - - user: user - -Response Example ----------------- - -.. literalinclude:: samples/receiver-update-response.json - :language: javascript - - -Delete receiver -=============== - -.. rest_method:: DELETE /v1/receivers/{receiver_id} - -Deletes a receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 204 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -This operation does not accept a request body. - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - -This operation does not return a response body. - - -Notify receiver -=============== - -.. rest_method:: POST /v1/receivers/{receiver_id}/notify - -Notifies message type receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -This operation does not accept a request body. - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - receiver_id: receiver_id_url - - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - -This operation does not return a response body. diff --git a/api-ref/source/samples/action-get-request.json b/api-ref/source/samples/action-get-request.json deleted file mode 100644 index c3d511d00..000000000 --- a/api-ref/source/samples/action-get-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "action": { - "status": "CANCELLED", - } -} diff --git a/api-ref/source/samples/action-get-response.json b/api-ref/source/samples/action-get-response.json deleted file mode 100644 index 56d4c90fa..000000000 --- a/api-ref/source/samples/action-get-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "action": { - "action": "CLUSTER_DELETE", - "cause": "RPC Request", - "created_at": "2015-06-27T05:09:43Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1423570000.0, - "id": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81", - "inputs": {}, - "interval": -1, - "name": "cluster_delete_fcc9b635", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1423570000.0, - "status": "FAILED", - "status_reason": "Cluster action FAILED", - "target": "fcc9b635-52e3-490b-99f2-87b1640e4e89", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - } -} diff --git a/api-ref/source/samples/actions-list-response.json b/api-ref/source/samples/actions-list-response.json deleted file mode 100644 index 83fbb57bd..000000000 --- a/api-ref/source/samples/actions-list-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "actions": [ - { - "action": "NODE_CREATE", - "cause": "RPC Request", - "created_at": "2015-12-04T04:54:41Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1425550000.0, - "id": "2366d440-c73e-4961-9254-6d1c3af7c167", - "inputs": {}, - "interval": -1, - "name": "node_create_0df0931b", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1425550000.0, - "status": "SUCCEEDED", - "status_reason": "Action completed successfully.", - "target": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - }, - { - "action": "NODE_DELETE", - "cause": "RPC Request", - "created_at": "2015-11-04T05:21:41Z", - "data": {}, - "depended_by": [], - "depends_on": [], - "end_time": 1425550000.0, - "id": "edce3528-864f-41fb-8759-f4707925cc09", - "inputs": {}, - "interval": -1, - "name": "node_delete_f0de9b9c", - "outputs": {}, - "owner": null, - "project": "f1fe61dcda2f4618a14c10dc7abc214d", - "start_time": 1425550000.0, - "status": "SUCCEEDED", - "status_reason": "Action completed successfully.", - "target": "f0de9b9c-6d48-4a46-af21-2ca8607777fe", - "timeout": 3600, - "updated_at": null, - "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" - } - ] -} diff --git a/api-ref/source/samples/build-show-response.json b/api-ref/source/samples/build-show-response.json deleted file mode 100644 index a5ddde4c0..000000000 --- a/api-ref/source/samples/build-show-response.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "build_info": { - "api": { - "revision": "1.0" - }, - "engine": { - "revision": "2.0" - } - } -} diff --git a/api-ref/source/samples/cluster-action-response.json b/api-ref/source/samples/cluster-action-response.json deleted file mode 100644 index 1182ce558..000000000 --- a/api-ref/source/samples/cluster-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "2a0ff107-e789-4660-a122-3816c43af703" -} \ No newline at end of file diff --git a/api-ref/source/samples/cluster-add-nodes-request.json b/api-ref/source/samples/cluster-add-nodes-request.json deleted file mode 100644 index e002fb8ac..000000000 --- a/api-ref/source/samples/cluster-add-nodes-request.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "add_nodes": { - "nodes": [ - "node-1234", - "node-5678" - ] - } -} diff --git a/api-ref/source/samples/cluster-attach-policy-request.json b/api-ref/source/samples/cluster-attach-policy-request.json deleted file mode 100644 index 8612d638b..000000000 --- a/api-ref/source/samples/cluster-attach-policy-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "policy_attach": { - "policy_id": "dp01", - "enabled": false - } -} diff --git a/api-ref/source/samples/cluster-attrs-list-response.json b/api-ref/source/samples/cluster-attrs-list-response.json deleted file mode 100644 index ee2f5b503..000000000 --- a/api-ref/source/samples/cluster-attrs-list-response.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "cluster_attributes": [ - { - "id": "28b1771d-5aaf-4692-b701-fd216b4fd9e9", - "value": "10.0.0.12" - }, - { - "id": "02db8741-03c5-466c-98a0-b83d4bb92c8c", - "value": "10.0.0.13" - }, - { - "id": "08a7eec7-0f94-4f7a-92f2-55ffb1049335", - "value": "10.0.0.14" - } - ] -} diff --git a/api-ref/source/samples/cluster-check-request.json b/api-ref/source/samples/cluster-check-request.json deleted file mode 100644 index 6d831ea62..000000000 --- a/api-ref/source/samples/cluster-check-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "check": {} -} diff --git a/api-ref/source/samples/cluster-complete-lifecycle-request.json b/api-ref/source/samples/cluster-complete-lifecycle-request.json deleted file mode 100644 index f6c152300..000000000 --- a/api-ref/source/samples/cluster-complete-lifecycle-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "complete_lifecycle": { - "lifecycle_action_token": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81" - } -} diff --git a/api-ref/source/samples/cluster-create-request.json b/api-ref/source/samples/cluster-create-request.json deleted file mode 100644 index 8f8ca16c4..000000000 --- a/api-ref/source/samples/cluster-create-request.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "cluster": { - "config": {}, - "desired_capacity": 0, - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "profile_id": "mystack", - "timeout": null - } -} diff --git a/api-ref/source/samples/cluster-create-response.json b/api-ref/source/samples/cluster-create-response.json deleted file mode 100644 index ddb424488..000000000 --- a/api-ref/source/samples/cluster-create-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": null, - "data": {}, - "dependents": {}, - "desired_capacity": 4, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:16:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "INIT", - "status_reason": "Initializing", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/cluster-del-nodes-request.json b/api-ref/source/samples/cluster-del-nodes-request.json deleted file mode 100644 index ecc3ff930..000000000 --- a/api-ref/source/samples/cluster-del-nodes-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "del_nodes": { - "nodes": [ - "aff0135", - "e28a207" - ], - "destroy_after_deletion": false - } -} diff --git a/api-ref/source/samples/cluster-detach-policy-request.json b/api-ref/source/samples/cluster-detach-policy-request.json deleted file mode 100644 index 54b828817..000000000 --- a/api-ref/source/samples/cluster-detach-policy-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "policy_detach": { - "policy_id": "5630fb31" - } -} diff --git a/api-ref/source/samples/cluster-list-response.json b/api-ref/source/samples/cluster-list-response.json deleted file mode 100644 index 54ca5489d..000000000 --- a/api-ref/source/samples/cluster-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "clusters": [ - { - "created_at": "2016-05-11T07:29:04", - "data": {}, - "desired_capacity": 0, - "domain": null, - "id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", - "init_at": "2016-05-11T07:29:04", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "c0", - "nodes": [], - "policies": [], - "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", - "profile_name": "pcirros", - "project": "eee0b7c083e84501bdd50fb269d2a10e", - "status": "ACTIVE", - "status_reason": "Cluster creation succeeded.", - "timeout": 3600, - "updated_at": null, - "user": "ab79b9647d074e46ac223a8fa297b846" - } - ] -} diff --git a/api-ref/source/samples/cluster-operation-request.json b/api-ref/source/samples/cluster-operation-request.json deleted file mode 100644 index 9b3b20ac7..000000000 --- a/api-ref/source/samples/cluster-operation-request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "reboot": { - "filters": { - "role": "slave" - }, - "params": { - "type": "soft" - } - } -} diff --git a/api-ref/source/samples/cluster-policies-list-response.json b/api-ref/source/samples/cluster-policies-list-response.json deleted file mode 100644 index a34262104..000000000 --- a/api-ref/source/samples/cluster-policies-list-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "cluster_policies": [ - { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "06be3a1f-b238-4a96-a737-ceec5714087e", - "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", - "policy_name": "dp01", - "policy_type": "senlin.policy.deletion-1.0" - }, - { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "abddc45e-ac31-4f90-93cc-db55a7d8dd6d", - "policy_id": "e026e09f-a3e9-4dad-a1b9-d7ba316026a1", - "policy_name": "sp1", - "policy_type": "senlin.policy.scaling-1.0" - } - ] -} diff --git a/api-ref/source/samples/cluster-policy-show-response.json b/api-ref/source/samples/cluster-policy-show-response.json deleted file mode 100644 index caf05eb91..000000000 --- a/api-ref/source/samples/cluster-policy-show-response.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "cluster_policy": { - "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "cluster_name": "cluster4", - "enabled": true, - "id": "06be3a1f-b238-4a96-a737-ceec5714087e", - "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", - "policy_name": "dp01", - "policy_type": "senlin.policy.deletion-1.0" - } -} diff --git a/api-ref/source/samples/cluster-recover-request.json b/api-ref/source/samples/cluster-recover-request.json deleted file mode 100644 index 27d6e43a3..000000000 --- a/api-ref/source/samples/cluster-recover-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "recover": { - "operation": "reboot", - "operation_params": { - "type": "soft" - }, - "check": false - } -} diff --git a/api-ref/source/samples/cluster-replace-nodes-request.json b/api-ref/source/samples/cluster-replace-nodes-request.json deleted file mode 100644 index 5fba29167..000000000 --- a/api-ref/source/samples/cluster-replace-nodes-request.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "replace_nodes": { - "nodes": { - "node-1234": "node-5678" - } - } -} diff --git a/api-ref/source/samples/cluster-resize-request.json b/api-ref/source/samples/cluster-resize-request.json deleted file mode 100644 index 5bdf47e0c..000000000 --- a/api-ref/source/samples/cluster-resize-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "resize": { - "adjustment_type": "CHANGE_IN_CAPACITY", - "max_size": 5, - "min_size": 1, - "number": -2, - "strict": true - } -} diff --git a/api-ref/source/samples/cluster-scale-in-request.json b/api-ref/source/samples/cluster-scale-in-request.json deleted file mode 100644 index 561f3bf00..000000000 --- a/api-ref/source/samples/cluster-scale-in-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "scale_in": { - "count": 2 - } -} diff --git a/api-ref/source/samples/cluster-scale-out-request.json b/api-ref/source/samples/cluster-scale-out-request.json deleted file mode 100644 index b0c9b757e..000000000 --- a/api-ref/source/samples/cluster-scale-out-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "scale_out": { - "count": 2 - } -} diff --git a/api-ref/source/samples/cluster-show-response.json b/api-ref/source/samples/cluster-show-response.json deleted file mode 100644 index cdd056cf9..000000000 --- a/api-ref/source/samples/cluster-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": "2015-02-11T15:13:20Z", - "data": {}, - "dependents": {}, - "desired_capacity": 0, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:26:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/cluster-update-policy-request.json b/api-ref/source/samples/cluster-update-policy-request.json deleted file mode 100644 index 26b3b8e12..000000000 --- a/api-ref/source/samples/cluster-update-policy-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "update_policy": { - "policy_id": "dp01", - "enabled": false - } -} diff --git a/api-ref/source/samples/cluster-update-request.json b/api-ref/source/samples/cluster-update-request.json deleted file mode 100644 index 2baf4b02b..000000000 --- a/api-ref/source/samples/cluster-update-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "cluster": { - "metadata": null, - "name": null, - "profile_id": null, - "timeout": 30, - "profile_only": true - } -} diff --git a/api-ref/source/samples/cluster-update-response.json b/api-ref/source/samples/cluster-update-response.json deleted file mode 100644 index f134242a5..000000000 --- a/api-ref/source/samples/cluster-update-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "cluster": { - "config": {}, - "created_at": "2015-02-11T15:13:20Z", - "data": {}, - "dependents": {}, - "desired_capacity": 0, - "domain": null, - "id": "45edadcb-c73b-4920-87e1-518b2f29f54b", - "init_at": "2015-02-10T14:26:10", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "test_cluster", - "nodes": [], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "UPDATING", - "status_reason": "Updating", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/clusters-list-response.json b/api-ref/source/samples/clusters-list-response.json deleted file mode 100644 index fb733427a..000000000 --- a/api-ref/source/samples/clusters-list-response.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "clusters": [ - { - "config": {}, - "created_at": "2015-02-10T14:26:14Z", - "data": {}, - "dependents": {}, - "desired_capacity": 4, - "domain": null, - "id": "7d85f602-a948-4a30-afd4-e84f47471c15", - "init_at": "2015-02-10T14:26:11", - "max_size": -1, - "metadata": {}, - "min_size": 0, - "name": "cluster1", - "nodes": [ - "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", - "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", - "da1e9c87-e584-4626-a120-022da5062dac" - ], - "policies": [], - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "ACTIVE", - "status_reason": "Cluster scale-in succeeded", - "timeout": 3600, - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/event-show-response.json b/api-ref/source/samples/event-show-response.json deleted file mode 100644 index 535cabea2..000000000 --- a/api-ref/source/samples/event-show-response.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "event": { - "action": "create", - "cluster": null, - "cluster_id": null, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "meta_data": {}, - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15Z", - "user": "a21ded6060534d99840658a777c2af5a" - } -} diff --git a/api-ref/source/samples/events-list-response.json b/api-ref/source/samples/events-list-response.json deleted file mode 100644 index 5a2bdcfb8..000000000 --- a/api-ref/source/samples/events-list-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "events": [ - { - "action": "create", - "cluster": null, - "cluster_id": null, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "meta_data": {}, - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15Z", - "user": "a21ded6060534d99840658a777c2af5a" - } - ] -} diff --git a/api-ref/source/samples/node-action-response.json b/api-ref/source/samples/node-action-response.json deleted file mode 100644 index 458f57640..000000000 --- a/api-ref/source/samples/node-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "7f760b61-7b15-4a50-af05-319922fa3229" -} \ No newline at end of file diff --git a/api-ref/source/samples/node-adopt-preview-request.json b/api-ref/source/samples/node-adopt-preview-request.json deleted file mode 100644 index 0ee9477a4..000000000 --- a/api-ref/source/samples/node-adopt-preview-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "identity": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "overrides": null, - "snapshot": true, - "type": "os.nova.server-1.0" -} diff --git a/api-ref/source/samples/node-adopt-preview-response.json b/api-ref/source/samples/node-adopt-preview-response.json deleted file mode 100644 index d35054712..000000000 --- a/api-ref/source/samples/node-adopt-preview-response.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "node_preview": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } -} diff --git a/api-ref/source/samples/node-adopt-request.json b/api-ref/source/samples/node-adopt-request.json deleted file mode 100644 index 12050a700..000000000 --- a/api-ref/source/samples/node-adopt-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "identity": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "metadata": {}, - "name": "node009", - "overrides": null, - "role": "master", - "snapshot": true, - "type": "os.nova.server-1.0" -} diff --git a/api-ref/source/samples/node-adopt-response.json b/api-ref/source/samples/node-adopt-response.json deleted file mode 100644 index 696d5558a..000000000 --- a/api-ref/source/samples/node-adopt-response.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": null, - "data": {}, - "domain": null, - "id": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "index": -1, - "init_at": "2015-03-05T08:53:15Z", - "metadata": {}, - "name": "node009", - "physical_id": "65e27958-d6dc-4b0e-87bf-78e8f5536cbc", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "prof-node009", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": "master", - "status": "ACTIVE", - "status_reason": "Node adopted successfully", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-check-request.json b/api-ref/source/samples/node-check-request.json deleted file mode 100644 index 6d831ea62..000000000 --- a/api-ref/source/samples/node-check-request.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "check": {} -} diff --git a/api-ref/source/samples/node-create-request.json b/api-ref/source/samples/node-create-request.json deleted file mode 100644 index 55e5c3311..000000000 --- a/api-ref/source/samples/node-create-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "node": { - "cluster_id": null, - "metadata": {}, - "name": "node009", - "profile_id": "mystack", - "role": "master" - } -} diff --git a/api-ref/source/samples/node-create-response.json b/api-ref/source/samples/node-create-response.json deleted file mode 100644 index 5944459c7..000000000 --- a/api-ref/source/samples/node-create-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": null, - "data": {}, - "dependents": {}, - "domain": null, - "id": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "index": -1, - "init_at": "2015-03-05T08:53:15Z", - "metadata": {}, - "name": "node009", - "physical_id": "", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": "master", - "status": "INIT", - "status_reason": "Initializing", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-list-response.json b/api-ref/source/samples/node-list-response.json deleted file mode 100644 index 9cb82d8ea..000000000 --- a/api-ref/source/samples/node-list-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "nodes": [ - { - "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", - "created_at": "2016-05-13T07:02:20Z", - "data": {}, - "dependents": {}, - "domain": null, - "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", - "index": 2, - "init_at": "2016-05-13T07:02:04Z", - "metadata": {}, - "name": "node-e395be1e-002", - "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", - "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", - "profile_name": "pcirros", - "project_id": "eee0b7c083e84501bdd50fb269d2a10e", - "role": "", - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "updated_at": null, - "user": "ab79b9647d074e46ac223a8fa297b846" - } - ] -} diff --git a/api-ref/source/samples/node-operation-request.json b/api-ref/source/samples/node-operation-request.json deleted file mode 100644 index 86d866af3..000000000 --- a/api-ref/source/samples/node-operation-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "reboot": { - "type": "SOFT" - } -} diff --git a/api-ref/source/samples/node-recover-request.json b/api-ref/source/samples/node-recover-request.json deleted file mode 100644 index 27d6e43a3..000000000 --- a/api-ref/source/samples/node-recover-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "recover": { - "operation": "reboot", - "operation_params": { - "type": "soft" - }, - "check": false - } -} diff --git a/api-ref/source/samples/node-show-response.json b/api-ref/source/samples/node-show-response.json deleted file mode 100644 index b513ce6cc..000000000 --- a/api-ref/source/samples/node-show-response.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": { - "cluster_id": null, - "created_at": "2015-02-10T12:03:16Z", - "data": {}, - "dependents": {}, - "domain": null, - "id": "d5779bb0-f0a0-49c9-88cc-6f078adb5a0b", - "index": -1, - "init_at": "2015-02-10T12:03:13", - "metadata": {}, - "name": "node1", - "physical_id": "f41537fa-22ab-4bea-94c0-c874e19d0c80", - "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", - "profile_name": "mystack", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "role": null, - "status": "ACTIVE", - "status_reason": "Creation succeeded", - "updated_at": "2015-03-04T04:58:27Z", - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/node-update-request.json b/api-ref/source/samples/node-update-request.json deleted file mode 100644 index b593bb7f8..000000000 --- a/api-ref/source/samples/node-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "node": { - "name": "new_node_name" - } -} diff --git a/api-ref/source/samples/policy-create-request.json b/api-ref/source/samples/policy-create-request.json deleted file mode 100644 index d610eedec..000000000 --- a/api-ref/source/samples/policy-create-request.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "policy": { - "name": "sp001", - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - } -} diff --git a/api-ref/source/samples/policy-create-response.json b/api-ref/source/samples/policy-create-response.json deleted file mode 100644 index 4d9f6fcaf..000000000 --- a/api-ref/source/samples/policy-create-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policy": { - "created_at": "2015-03-02T07:40:31", - "data": {}, - "domain": null, - "id": "02f62195-2198-4797-b0a9-877632208527", - "name": "sp001", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "adjustment": { - "best_effort": true, - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-list-response.json b/api-ref/source/samples/policy-list-response.json deleted file mode 100644 index 6e87c214d..000000000 --- a/api-ref/source/samples/policy-list-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policies": [ - { - "created_at": "2015-02-15T08:33:13.000000", - "data": {}, - "domain": null, - "id": "7192d8df-73be-4e98-ab99-1cf6d5066729", - "name": "test_policy_1", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "description": "A test policy", - "properties": { - "criteria": "OLDEST_FIRST", - "destroy_after_deletion": true, - "grace_period": 60, - "reduce_desired_capacity": false - }, - "type": "senlin.policy.deletion", - "version": "1.0" - }, - "type": "senlin.policy.deletion-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/policy-show-response.json b/api-ref/source/samples/policy-show-response.json deleted file mode 100644 index 4d9f6fcaf..000000000 --- a/api-ref/source/samples/policy-show-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "policy": { - "created_at": "2015-03-02T07:40:31", - "data": {}, - "domain": null, - "id": "02f62195-2198-4797-b0a9-877632208527", - "name": "sp001", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "adjustment": { - "best_effort": true, - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-type-show-response-v1.5.json b/api-ref/source/samples/policy-type-show-response-v1.5.json deleted file mode 100644 index 8b9563eef..000000000 --- a/api-ref/source/samples/policy-type-show-response-v1.5.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "policy_type": { - "name": "senlin.policy.affinity-1.0", - "schema": { - "availability_zone": { - "description": "Name of the availability zone to place the nodes.", - "required": false, - "type": "String", - "updatable": false - }, - "enable_drs_extension": { - "default": false, - "description": "Enable vSphere DRS extension.", - "required": false, - "type": "Boolean", - "updatable": false - }, - "servergroup": { - "description": "Properties of the VM server group", - "required": false, - "schema": { - "name": { - "description": "The name of the server group", - "required": false, - "type": "String", - "updatable": false - }, - "policies": { - "constraints": [ - { - "constraint": [ - "affinity", - "anti-affinity" - ], - "type": "AllowedValues" - } - ], - "default": "anti-affinity", - "description": "The server group policies.", - "required": false, - "type": "String", - "updatable": false - } - }, - "type": "Map", - "updatable": false - } - }, - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - } -} diff --git a/api-ref/source/samples/policy-type-show-response.json b/api-ref/source/samples/policy-type-show-response.json deleted file mode 100644 index bec3e6152..000000000 --- a/api-ref/source/samples/policy-type-show-response.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "policy_type": { - "name": "senlin.policy.affinity-1.0", - "schema": { - "availability_zone": { - "description": "Name of the availability zone to place the nodes.", - "required": false, - "type": "String", - "updatable": false - }, - "enable_drs_extension": { - "default": false, - "description": "Enable vSphere DRS extension.", - "required": false, - "type": "Boolean", - "updatable": false - }, - "servergroup": { - "description": "Properties of the VM server group", - "required": false, - "schema": { - "name": { - "description": "The name of the server group", - "required": false, - "type": "String", - "updatable": false - }, - "policies": { - "constraints": [ - { - "constraint": [ - "affinity", - "anti-affinity" - ], - "type": "AllowedValues" - } - ], - "default": "anti-affinity", - "description": "The server group policies.", - "required": false, - "type": "String", - "updatable": false - } - }, - "type": "Map", - "updatable": false - } - } - } -} diff --git a/api-ref/source/samples/policy-types-list-response-v1.5.json b/api-ref/source/samples/policy-types-list-response-v1.5.json deleted file mode 100644 index 879e688ec..000000000 --- a/api-ref/source/samples/policy-types-list-response-v1.5.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "policy_types": [ - { - "name": "senlin.policy.affinity", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - }, - { - "name": "senlin.policy.health", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2016.10" - } - ] - } - }, - { - "name": "senlin.policy.scaling", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - }, - { - "name": "senlin.policy.region_placement", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - } - ] -} diff --git a/api-ref/source/samples/policy-types-list-response.json b/api-ref/source/samples/policy-types-list-response.json deleted file mode 100644 index 6914ce935..000000000 --- a/api-ref/source/samples/policy-types-list-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "policy_types": [ - { - "name": "senlin.policy.affinity-1.0" - }, - { - "name": "senlin.policy.batch-1.0" - }, - { - "name": "senlin.policy.health-1.0" - }, - { - "name": "senlin.policy.scaling-1.0" - }, - { - "name": "senlin.policy.region_placement-1.0" - }, - { - "name": "senlin.policy.deletion-1.0" - }, - { - "name": "senlin.policy.loadbalance-1.1" - }, - { - "name": "senlin.policy.zone_placement-1.0" - } - ] -} diff --git a/api-ref/source/samples/policy-update-request.json b/api-ref/source/samples/policy-update-request.json deleted file mode 100644 index c1c489a2a..000000000 --- a/api-ref/source/samples/policy-update-request.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "policy": { - "name": "new_name" - } -} diff --git a/api-ref/source/samples/policy-update-response.json b/api-ref/source/samples/policy-update-response.json deleted file mode 100644 index 98f53d70b..000000000 --- a/api-ref/source/samples/policy-update-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "policy": { - "created_at": "2015-10-14T09:14:53", - "data": {}, - "domain": null, - "id": "ac5415bd-f522-4160-8be0-f8853e4bc332", - "name": "dp01", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "description": "A policy for node deletion.", - "properties": { - "criteria": "OLDEST_FIRST", - "destroy_after_deletion": true, - "grace_period": 60, - "reduce_desired_capacity": false - }, - "type": "senlin.policy.deletion", - "version": "1.0" - }, - "type": "senlin.policy.deletion-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/policy-validate-request.json b/api-ref/source/samples/policy-validate-request.json deleted file mode 100644 index bc17f02bd..000000000 --- a/api-ref/source/samples/policy-validate-request.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "policy": { - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - } -} diff --git a/api-ref/source/samples/policy-validate-response.json b/api-ref/source/samples/policy-validate-response.json deleted file mode 100644 index f888649b5..000000000 --- a/api-ref/source/samples/policy-validate-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "policy": { - "created_at": null, - "data": {}, - "domain": null, - "id": null, - "name": "validated_policy", - "project": "1d567ed4ef51453a85545f018b68c26d", - "spec": { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - }, - "type": "senlin.policy.scaling-1.0", - "updated_at": null, - "user": "990e4c1f4a414f74990b17d16f2540b5" - } -} diff --git a/api-ref/source/samples/profile-create-request.json b/api-ref/source/samples/profile-create-request.json deleted file mode 100644 index 705790b33..000000000 --- a/api-ref/source/samples/profile-create-request.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "profile": { - "metadata": {}, - "name": "test-profile", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } - } -} diff --git a/api-ref/source/samples/profile-create-response.json b/api-ref/source/samples/profile-create-response.json deleted file mode 100644 index 527207ccf..000000000 --- a/api-ref/source/samples/profile-create-response.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "profile": { - "created_at": "2016-05-05T10:15:22Z", - "domain": null, - "id": "1d85fc39-7d9a-4f64-9751-b127ef554923", - "metadata": {}, - "name": "test-profile", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-list-response.json b/api-ref/source/samples/profile-list-response.json deleted file mode 100644 index e302c880d..000000000 --- a/api-ref/source/samples/profile-list-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "profiles": [ - { - "created_at": "2016-01-03T16:22:23Z", - "domain": null, - "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", - "metadata": {}, - "name": "pserver", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": 1, - "image": "cirros-0.3.4-x86_64-uec", - "key_name": "oskey", - "name": "cirros_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } - ] -} diff --git a/api-ref/source/samples/profile-show-response.json b/api-ref/source/samples/profile-show-response.json deleted file mode 100644 index eb6e185d4..000000000 --- a/api-ref/source/samples/profile-show-response.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "profile": { - "created_at": "2016-03-10T06:34:56Z", - "domain": null, - "id": "17151d8a-f46f-4541-bde0-db3b207c20d2", - "metadata": {}, - "name": "PF20", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-type-ops-response.json b/api-ref/source/samples/profile-type-ops-response.json deleted file mode 100644 index 86846e0ed..000000000 --- a/api-ref/source/samples/profile-type-ops-response.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "operations": { - "reboot": { - "description": "Reboot the nova server.", - "parameters": { - "type": { - "constraints": [ - { - "constraint": [ - "SOFT", - "HARD" - ], - "type": "AllowedValues" - } - ], - "default": "SOFT", - "description": "Type of reboot which can be 'SOFT' or 'HARD'.", - "required": false, - "type": "String" - } - } - } - } -} diff --git a/api-ref/source/samples/profile-type-show-response-v1.5.json b/api-ref/source/samples/profile-type-show-response-v1.5.json deleted file mode 100644 index 720b7e62c..000000000 --- a/api-ref/source/samples/profile-type-show-response-v1.5.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "profile_type": { - "name": "os.heat.stack-1.0", - "schema": { - "context": { - "default": {}, - "description": "A dictionary for specifying the customized context for stack operations", - "required": false, - "type": "Map", - "updatable": false - }, - "disable_rollback": { - "default": true, - "description": "A boolean specifying whether a stack operation can be rolled back.", - "required": false, - "type": "Boolean", - "updatable": true - }, - "environment": { - "default": {}, - "description": "A map that specifies the environment used for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "files": { - "default": {}, - "description": "Contents of files referenced by the template, if any.", - "required": false, - "type": "Map", - "updatable": true - }, - "parameters": { - "default": {}, - "description": "Parameters to be passed to Heat for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "template": { - "default": {}, - "description": "Heat stack template.", - "required": false, - "type": "Map", - "updatable": true - }, - "template_url": { - "default": "", - "description": "Heat stack template url.", - "required": false, - "type": "String", - "updatable": true - }, - "timeout": { - "description": "A integer that specifies the number of minutes that a stack operation times out.", - "required": false, - "type": "Integer", - "updatable": true - } - }, - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - } -} diff --git a/api-ref/source/samples/profile-type-show-response.json b/api-ref/source/samples/profile-type-show-response.json deleted file mode 100644 index 90d2d514b..000000000 --- a/api-ref/source/samples/profile-type-show-response.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "profile_type": { - "name": "os.heat.stack-1.0", - "schema": { - "context": { - "default": {}, - "description": "A dictionary for specifying the customized context for stack operations", - "required": false, - "type": "Map", - "updatable": false - }, - "disable_rollback": { - "default": true, - "description": "A boolean specifying whether a stack operation can be rolled back.", - "required": false, - "type": "Boolean", - "updatable": true - }, - "environment": { - "default": {}, - "description": "A map that specifies the environment used for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "files": { - "default": {}, - "description": "Contents of files referenced by the template, if any.", - "required": false, - "type": "Map", - "updatable": true - }, - "parameters": { - "default": {}, - "description": "Parameters to be passed to Heat for stack operations.", - "required": false, - "type": "Map", - "updatable": true - }, - "template": { - "default": {}, - "description": "Heat stack template.", - "required": false, - "type": "Map", - "updatable": true - }, - "template_url": { - "default": "", - "description": "Heat stack template url.", - "required": false, - "type": "String", - "updatable": true - }, - "timeout": { - "description": "A integer that specifies the number of minutes that a stack operation times out.", - "required": false, - "type": "Integer", - "updatable": true - } - } - } -} diff --git a/api-ref/source/samples/profile-types-list-response-v1.5.json b/api-ref/source/samples/profile-types-list-response-v1.5.json deleted file mode 100644 index 5d51596f5..000000000 --- a/api-ref/source/samples/profile-types-list-response-v1.5.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "profile_types": [ - { - "name": "container.dockerinc.docker", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "EXPERIMENTAL", - "since": "2017.02" - } - ] - } - }, - { - "name": "os.heat.stack", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - }, - { - "name": "os.nova.server", - "version": "1.0", - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - } - } - ] -} diff --git a/api-ref/source/samples/profile-types-list-response.json b/api-ref/source/samples/profile-types-list-response.json deleted file mode 100644 index fcf5cf743..000000000 --- a/api-ref/source/samples/profile-types-list-response.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "profile_types": [ - { - "name": "container.dockerinc.docker-1.0" - }, - { - "name": "os.heat.stack-1.0" - }, - { - "name": "os.nova.server-1.0" - } - ] -} diff --git a/api-ref/source/samples/profile-update-request.json b/api-ref/source/samples/profile-update-request.json deleted file mode 100644 index 5ae1fb158..000000000 --- a/api-ref/source/samples/profile-update-request.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "profile": { - "metadata": {"key": "value"}, - "name": "new-name" - } -} diff --git a/api-ref/source/samples/profile-update-response.json b/api-ref/source/samples/profile-update-response.json deleted file mode 100644 index 17a5e1502..000000000 --- a/api-ref/source/samples/profile-update-response.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "profile": { - "created_at": "2016-03-10T06:34:56Z", - "domain": null, - "id": "17151d8a-f46f-4541-bde0-db3b207c20d2", - "metadata": { - "key": "value" - }, - "name": "new-name", - "project": "42d9e9663331431f97b75e25136307ff", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": "2016-03-11T05:10:11Z", - "user": "5e5bf8027826429c96af157f68dc9072" - } -} diff --git a/api-ref/source/samples/profile-validate-request.json b/api-ref/source/samples/profile-validate-request.json deleted file mode 100644 index f99d9f7b2..000000000 --- a/api-ref/source/samples/profile-validate-request.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "profile": { - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - } - } -} diff --git a/api-ref/source/samples/profile-validate-response.json b/api-ref/source/samples/profile-validate-response.json deleted file mode 100644 index 32042ab06..000000000 --- a/api-ref/source/samples/profile-validate-response.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "profile": { - "created_at": null, - "domain": null, - "id": null, - "metadata": null, - "name": "validated_profile", - "project": "1d567ed4ef51453a85545f018b68c26d", - "spec": { - "properties": { - "flavor": "m1.small", - "image": "F20", - "key_name": "oskey", - "name": "F20_server", - "networks": [ - { - "network": "private" - } - ], - "security_groups": [ - "default" - ] - }, - "type": "os.nova.server", - "version": 1.0 - }, - "type": "os.nova.server-1.0", - "updated_at": null, - "user": "990e4c1f4a414f74990b17d16f2540b5" - } -} diff --git a/api-ref/source/samples/receiver-create-request.json b/api-ref/source/samples/receiver-create-request.json deleted file mode 100644 index c78b95b9c..000000000 --- a/api-ref/source/samples/receiver-create-request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "cluster_id": "cf99d754-3cdc-47f4-8a29-cd14f02f5436", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "type": "webhook" - } -} diff --git a/api-ref/source/samples/receiver-create-response.json b/api-ref/source/samples/receiver-create-response.json deleted file mode 100644 index 8c5747908..000000000 --- a/api-ref/source/samples/receiver-create-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receiver-show-response.json b/api-ref/source/samples/receiver-show-response.json deleted file mode 100644 index 8c5747908..000000000 --- a/api-ref/source/samples/receiver-show-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receiver-update-request.json b/api-ref/source/samples/receiver-update-request.json deleted file mode 100644 index 54fcb76ea..000000000 --- a/api-ref/source/samples/receiver-update-request.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "receiver": { - "name": "new-name", - "action": "CLUSTER_SCALE_OUT", - "params": { - "count": "2" - } - } -} diff --git a/api-ref/source/samples/receiver-update-response.json b/api-ref/source/samples/receiver-update-response.json deleted file mode 100644 index 1851e68b5..000000000 --- a/api-ref/source/samples/receiver-update-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "receiver": { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=2" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "new-name", - "params": { - "count": "2" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": "2016-03-11T05:10:11", - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } -} diff --git a/api-ref/source/samples/receivers-list-response.json b/api-ref/source/samples/receivers-list-response.json deleted file mode 100644 index d65547c4c..000000000 --- a/api-ref/source/samples/receivers-list-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "receivers": [ - { - "action": "CLUSTER_SCALE_OUT", - "actor": { - "trust_id": [ - "6dc6d336e3fc4c0a951b5698cd1236d9" - ] - }, - "channel": { - "alarm_url": "http://node1:8777/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=2&count=1" - }, - "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", - "created_at": "2015-06-27T05:09:43", - "domain": "Default", - "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", - "name": "cluster_inflate", - "params": { - "count": "1" - }, - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "type": "webhook", - "updated_at": null, - "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" - } - ] -} diff --git a/api-ref/source/samples/services-list-response.json b/api-ref/source/samples/services-list-response.json deleted file mode 100644 index b501fdccf..000000000 --- a/api-ref/source/samples/services-list-response.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "services": [ - { - "binary": "senlin-engine", - "disabled_reason": null, - "host": "host1", - "id": "f93f83f6-762b-41b6-b757-80507834d394", - "state": "up", - "status": "enabled", - "topic": "senlin-engine", - "updated_at": "2017-04-24T07:43:12" - } - ] -} diff --git a/api-ref/source/samples/version-show-response.json b/api-ref/source/samples/version-show-response.json deleted file mode 100644 index 8fa260569..000000000 --- a/api-ref/source/samples/version-show-response.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "version": { - "id": "1.0", - "links": [ - { - "href": "/v1/", - "rel": "self" - }, - { - "href": "https://docs.openstack.org/api-ref/clustering", - "rel": "help" - } - ], - "max_version": "1.7", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.clustering-v1+json" - } - ], - "min_version": "1.0", - "status": "CURRENT", - "updated": "2016-01-18T00:00:00Z" - } -} diff --git a/api-ref/source/samples/versions-list-response.json b/api-ref/source/samples/versions-list-response.json deleted file mode 100644 index 12a4ca7bb..000000000 --- a/api-ref/source/samples/versions-list-response.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "versions": [ - { - "id": "1.0", - "links": [ - { - "href": "/v1/", - "rel": "self" - }, - { - "href": "https://docs.openstack.org/api-ref/clustering", - "rel": "help" - } - ], - "max_version": "1.7", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.clustering-v1+json" - } - ], - "min_version": "1.0", - "status": "CURRENT", - "updated": "2016-01-18T00:00:00Z" - } - ] -} diff --git a/api-ref/source/samples/webhook-action-response.json b/api-ref/source/samples/webhook-action-response.json deleted file mode 100644 index 5fb7bfa86..000000000 --- a/api-ref/source/samples/webhook-action-response.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "action": "290c44fa-c60f-4d75-a0eb-87433ba982a3" -} \ No newline at end of file diff --git a/api-ref/source/services.inc b/api-ref/source/services.inc deleted file mode 100644 index 2c338e18a..000000000 --- a/api-ref/source/services.inc +++ /dev/null @@ -1,60 +0,0 @@ -=================== -Services (services) -=================== - -Lists all services for senlin engine. - - -List services -=================== - -.. rest_method:: GET /v1/services - - min_version: 1.7 - -This API is only available since API microversion 1.7. - -Lists all services. - -Response codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 400 - - 401 - - 403 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - services: services - - binary: binary - - disabled_reason: disabled_reason - - host: host - - id: service_id - - state: service_state - - status: service_status - - topic: topic - - updated_at: updated_at - -Response Example ----------------- - -.. literalinclude:: samples/services-list-response.json - :language: javascript diff --git a/api-ref/source/status.yaml b/api-ref/source/status.yaml deleted file mode 100644 index 21a08799f..000000000 --- a/api-ref/source/status.yaml +++ /dev/null @@ -1,61 +0,0 @@ -################# -# Success Codes # -################# -200: - default: | - Request was successful. -201: - default: | - Resource was created and is ready to use. -202: - default: | - Request was accepted for processing, but the processing has not been - completed. A 'location' header is included in the response which contains - a link to check the progress of the request. -204: - default: | - The server has fulfilled the request by deleting the resource. -300: - default: | - There are multiple choices for resources. The request has to be more - specific to successfully retrieve one of these resources. - multi_version: | - There is more than one API version for choice. The client has to be more - specific to request a service endpoint. - -################# -# Error Codes # -################# - -400: - default: | - Some content in the request was invalid. -401: - default: | - User must authenticate before making a request. -403: - default: | - Policy does not allow current user to do this operation. -404: - default: | - The requested resource could not be found. -405: - default: | - Method is not valid for this endpoint. -406: - default: | - The requested API version is not supported by the API. -409: - default: | - This operation conflicted with another operation on this resource. - duplicate_zone: | - There is already a zone with this name. -500: - default: | - Something went wrong inside the service. This should not happen usually. - If it does happen, it means the server has experienced some serious - problems. -503: - default: | - Service is not available. This is mostly caused by service configuration - errors which prevents the service from successful start up. diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc deleted file mode 100644 index 552a03651..000000000 --- a/api-ref/source/versions.inc +++ /dev/null @@ -1,101 +0,0 @@ -============ -API Versions -============ - -Concepts -======== - -The Senlin API supports a ''major versions'' expressed in request URLs and -''microversions'' which can be sent in HTTP header ``OpenStack-API-Version``. - -When the specified ``OpenStack-API-Version`` is not supported by the API -service, a 406 (NotAcceptable) exception will be raised. Note that this applies -to all API requests documented in this guide. - -List Major Versions -=================== - -.. rest_method:: GET / - -Lists information for all Clustering API major versions. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 300: multi_version - -.. rest_status_code:: error status.yaml - - - 503 - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - versions: versions - - id: version_id - - links: version_links - - max_version: version_max_version - - media-types: version_media_types - - min_version: version_min_version - - status: version_status - - updated: version_updated - -Response Example ----------------- - -.. literalinclude:: samples/versions-list-response.json - :language: javascript - - -Show Details of an API Version -============================== - -.. rest_method:: GET /{version}/ - -Show details about an API major version. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 200 - -.. rest_status_code:: error status.yaml - - - 404 - - 406 - - 503 - -Request Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - version: version_url - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-ID: request_id - - version: version - - id: version_id - - links: version_links - - max_version: version_max_version - - media-types: version_media_types - - min_version: version_min_version - - status: version_status - - updated: version_updated - -Response Example ----------------- - -.. literalinclude:: samples/version-show-response.json - :language: javascript diff --git a/api-ref/source/webhooks.inc b/api-ref/source/webhooks.inc deleted file mode 100644 index 06c0242a2..000000000 --- a/api-ref/source/webhooks.inc +++ /dev/null @@ -1,55 +0,0 @@ -=================== -Webhooks (webhooks) -=================== - -Triggers an action represented by a webhook. For API microversion less than -1.10, optional params in the query are sent as inputs to be used by the -targeted action. For API microversion equal or greater than 1.10, any -key-value pairs in the request body are sent as inputs to be used by the -targeted action. - -Trigger webhook action -====================== - -.. rest_method:: POST /v1/webhooks/{webhook_id}/trigger - -Triggers a webhook receiver. - -Response Codes --------------- - -.. rest_status_code:: success status.yaml - - - 202 - -.. rest_status_code:: error status.yaml - - - 400 - - 403 - - 404 - - 503 - -Request Parameters ------------------- - -.. rest_parameters:: parameters.yaml - - - OpenStack-API-Version: microversion - - webhook_id: webhook_id_url - - V: webhook_version - - params: webhook_params - -Response Parameters -------------------- - -.. rest_parameters:: parameters.yaml - - - X-OpenStack-Request-Id: request_id - - Location: location - - action: action_action - -Response Example ----------------- - -.. literalinclude:: samples/webhook-action-response.json - :language: javascript diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 5774e51c1..000000000 --- a/bindep.txt +++ /dev/null @@ -1,2 +0,0 @@ -graphviz [!platform:gentoo] -media-gfx/graphviz [platform:gentoo] diff --git a/contrib/kubernetes/README.rst b/contrib/kubernetes/README.rst deleted file mode 100644 index 1e40c58d9..000000000 --- a/contrib/kubernetes/README.rst +++ /dev/null @@ -1,99 +0,0 @@ -kubernetes Profile -================== - -Installation ------------- - -:: - - pip install --editable . - - -Usage ------ - -Prepare a profile for master nodes -.................................. - -Copy the example profile file `kubemaster.yaml` from examples/kubemaster.yaml, -modify related parameters based on your openstack environment. -For now, only official ubuntu 16.04 cloud image is supported. - -:: - - openstack cluster profile create --spec-file kubemaster.yaml profile-master - -Create a cluster for master nodes -................................. - -For now, please create exactly one node in this cluster. This profile doesn't -support multiple master nodes as high-availability mode install. - -:: - - openstack cluster create --min-size 1 --desired-capacity 1 --max-size 1 --profile profile-master cm - - -Prepare a profile for worker nodes -.................................. - -Copy the example profile file `kubenode.yaml`, modify related parameters, -change master-cluster to the senlin cluster you just created. - -:: - - openstack cluster profile create --spec-file kubenode.yaml profile-node - - -Create a cluster for worker nodes -................................. - -:: - - openstack cluster create --desired-capacity 2 --profile profile-node cn - - - -Operate kubernetes ------------------- - -About kubeconfig -................ - -The config file for `kubectl` is located in the `/root/.kube/config` directory -on the master nodes. Copy this file out and place it at `$HOME/.kube/config`. -Change the IP to master node's floating IP in it. Run `kubectl get nodes` and -see if it works. - -Dashboard -......... - -Prepare following file to skip dashboard authentication:: - - $ cat ./dashboard-admin.yaml - apiVersion: rbac.authorization.k8s.io/v1beta1 - kind: ClusterRoleBinding - metadata: - name: kubernetes-dashboard - labels: - k8s-app: kubernetes-dashboard - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: kubernetes-dashboard - namespace: kube-system - -Apply this config:: - - kubectl apply -f ./dashboard-admin.yaml - -Start a proxy using `kubectl`:: - - kubectl proxy - -Open dashboard on browser at -`http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/`, -skip login process. diff --git a/contrib/kubernetes/TODO.rst b/contrib/kubernetes/TODO.rst deleted file mode 100644 index 97a11efdc..000000000 --- a/contrib/kubernetes/TODO.rst +++ /dev/null @@ -1,13 +0,0 @@ -TODO: -- Forbid deleting master cluster before deleting node cluster. -- Limit to no more than 1 node in master cluster. -- Drain node before deleting worker node. -- More validation before cluster creation. -- More exception catcher in code. - -Done: - -- Add ability to do actions on cluster creation/deletion. -- Add more network interfaces in drivers. -- Add kubernetes master profile, use kubeadm to setup one master node. -- Add kubernetes node profile, auto retrieve kubernetes data from master cluster. diff --git a/contrib/kubernetes/examples/kubemaster.yaml b/contrib/kubernetes/examples/kubemaster.yaml deleted file mode 100644 index c28c10570..000000000 --- a/contrib/kubernetes/examples/kubemaster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: senlin.kubernetes.master -version: 1.0 -properties: - flavor: k8s.master - image: ubuntu-16.04 - key_name: elynn - public_network: public diff --git a/contrib/kubernetes/examples/kubenode.yaml b/contrib/kubernetes/examples/kubenode.yaml deleted file mode 100644 index 59a9b55db..000000000 --- a/contrib/kubernetes/examples/kubenode.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: senlin.kubernetes.worker -version: 1.0 -properties: - flavor: k8s.worker - image: ubuntu-16.04 - key_name: elynn - master_cluster: cm diff --git a/contrib/kubernetes/kube/__init__.py b/contrib/kubernetes/kube/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/contrib/kubernetes/kube/base.py b/contrib/kubernetes/kube/base.py deleted file mode 100644 index 5f2bde37c..000000000 --- a/contrib/kubernetes/kube/base.py +++ /dev/null @@ -1,275 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import random -import string - -from oslo_log import log as logging - -from senlin.common import context -from senlin.common import exception as exc -from senlin.objects import cluster as cluster_obj -from senlin.profiles.os.nova import server - -LOG = logging.getLogger(__name__) - - -def GenKubeToken(): - token_id = ''.join([random.choice( - string.digits + string.ascii_lowercase) for i in range(6)]) - token_secret = ''.join([random.choice( - string.digits + string.ascii_lowercase) for i in range(16)]) - token = '.'.join([token_id, token_secret]) - return token - - -def loadScript(path): - script_file = os.path.join(os.path.dirname(__file__), path) - with open(script_file, "r") as f: - content = f.read() - return content - - -class KubeBaseProfile(server.ServerProfile): - """Kubernetes Base Profile.""" - - def __init__(self, type_name, name, **kwargs): - super(KubeBaseProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _generate_kubeadm_token(self, obj): - token = GenKubeToken() - # store generated token - - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - data = obj.data - data[self.KUBEADM_TOKEN] = token - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - return token - - def _get_kubeadm_token(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.KUBEADM_TOKEN) - return None - - def _update_master_ip(self, obj, ip): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - cluster.data['kube_master_ip'] = ip - cluster.update(ctx, obj.cluster_id, {'data': cluster.data}) - - def _create_network(self, obj): - client = self.network(obj) - try: - net = client.network_create() - subnet = client.subnet_create(network_id=net.id, - cidr='10.7.0.0/24', - ip_version=4) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex), - resource_id=obj.id) - pub_net = client.network_get(self.properties[self.PUBLIC_NETWORK]) - try: - router = client.router_create( - external_gateway_info={"network_id": pub_net.id}) - client.add_interface_to_router(router, subnet_id=subnet.id) - fip = client.floatingip_create(floating_network_id=pub_net.id) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex), - resource_id=obj.id) - - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - data = obj.data - data[self.PRIVATE_NETWORK] = net.id - data[self.PRIVATE_SUBNET] = subnet.id - data[self.PRIVATE_ROUTER] = router.id - data[self.KUBE_MASTER_FLOATINGIP] = fip.floating_ip_address - data[self.KUBE_MASTER_FLOATINGIP_ID] = fip.id - - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - - return net.id - - def _delete_network(self, obj): - client = self.network(obj) - fip_id = obj.data.get(self.KUBE_MASTER_FLOATINGIP_ID) - if fip_id: - try: - # delete floating ip - client.floatingip_delete(fip_id) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', id=fip_id, - message=str(ex)) - - router = obj.data.get(self.PRIVATE_ROUTER) - subnet = obj.data.get(self.PRIVATE_SUBNET) - if router and subnet: - try: - client.remove_interface_from_router(router, subnet_id=subnet) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=subnet, - message=str(ex)) - - if router: - try: - # delete router - client.router_delete(router, ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=router, - message=str(ex)) - - net = obj.data.get(self.PRIVATE_NETWORK) - if net: - try: - # delete network - client.network_delete(net, ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=net, - message=str(ex)) - - def _associate_floatingip(self, obj, server): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP) - if fip: - try: - self.compute(obj).server_floatingip_associate(server, - fip) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='floatingip', - type='kubernetes', - id=fip, - message=str(ex)) - - def _disassociate_floatingip(self, obj, server): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP) - if fip: - try: - self.compute(obj).server_floatingip_disassociate(server, - fip) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='floatingip', - type='kubernetes', - id=fip, - message=str(ex)) - - def _get_cluster_data(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data - return {} - - def _get_network(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.PRIVATE_NETWORK) - return None - - def _create_security_group(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - sgid = obj.data.get(self.SECURITY_GROUP, None) - if sgid: - return sgid - - client = self.network(obj) - try: - sg = client.security_group_create(name=self.name) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex)) - data = obj.data - data[self.SECURITY_GROUP] = sg.id - cluster_obj.Cluster.update(ctx, obj.id, {'data': data}) - self._set_security_group_rules(obj, sg.id) - - return sg.id - - def _get_security_group(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data.get(self.SECURITY_GROUP) - return None - - def _set_security_group_rules(self, obj, sgid): - client = self.network(obj) - open_ports = { - 'tcp': [22, 80, 8000, 8080, 6443, 8001, 8443, 443, - 179, 8082, 8086], - 'udp': [8285, 8472], - 'icmp': [None] - } - for p in open_ports.keys(): - for port in open_ports[p]: - try: - client.security_group_rule_create(sgid, port, protocol=p) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes', - message=str(ex)) - - def _delete_security_group(self, obj): - sgid = obj.data.get(self.SECURITY_GROUP) - if sgid: - try: - self.network(obj).security_group_delete(sgid, - ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='kubernetes', - id=sgid, - message=str(ex)) - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - return True diff --git a/contrib/kubernetes/kube/master.py b/contrib/kubernetes/kube/master.py deleted file mode 100644 index 1f2e99086..000000000 --- a/contrib/kubernetes/kube/master.py +++ /dev/null @@ -1,279 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -import jinja2 -from oslo_log import log as logging -from oslo_utils import encodeutils - -from kube import base -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.KubeBaseProfile): - """Profile for an kubernetes master server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.10'} - ] - } - - KEYS = ( - CONTEXT, FLAVOR, IMAGE, KEY_NAME, - PUBLIC_NETWORK, BLOCK_DEVICE_MAPPING_V2, - ) = ( - 'context', 'flavor', 'image', 'key_name', - 'public_network', 'block_device_mapping_v2', - ) - - INTERNAL_KEYS = ( - KUBEADM_TOKEN, KUBE_MASTER_IP, SECURITY_GROUP, - PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, - KUBE_MASTER_FLOATINGIP, KUBE_MASTER_FLOATINGIP_ID, - SCALE_OUT_RECV_ID, SCALE_OUT_URL, - ) = ( - 'kubeadm_token', 'kube_master_ip', 'security_group', - 'private_network', 'private_subnet', 'private_router', - 'kube_master_floatingip', 'kube_master_floatingip_id', - 'scale_out_recv_id', 'scale_out_url', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - PUBLIC_NETWORK: schema.String( - _('Public network for kubernetes.'), - required=True, - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _("Volume source type, must be one of 'image', " - "'snapshot', 'volume' or 'blank'"), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _("Volume destination type, must be 'volume' or " - "'local'"), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def do_cluster_create(self, obj): - self._generate_kubeadm_token(obj) - self._create_security_group(obj) - self._create_network(obj) - - def do_cluster_delete(self, obj): - if obj.dependents and 'kube-node' in obj.dependents: - msg = ("Cluster %s delete failed, " - "Node clusters %s must be deleted first." % - (obj.id, obj.dependents['kube-node'])) - raise exc.EResourceDeletion(type='kubernetes.master', - id=obj.id, - message=msg) - self._delete_network(obj) - self._delete_security_group(obj) - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = obj.name - - metadata = self._build_metadata(obj, {}) - kwargs['metadata'] = metadata - - jj_vars = {} - cluster_data = self._get_cluster_data(obj) - kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}] - - # Get user_data parameters from metadata - jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN] - jj_vars['MASTER_FLOATINGIP'] = cluster_data[ - self.KUBE_MASTER_FLOATINGIP] - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - obj, block_device_mapping_v2, 'create') - - # user_data = self.properties[self.USER_DATA] - user_data = base.loadScript('./scripts/master.sh') - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - - sgid = self._get_security_group(obj) - kwargs['security_groups'] = [{'name': sgid}] - - server = None - resource_id = None - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - server = self.compute(obj).server_get(server.id) - self._update_master_ip(obj, server.addresses[''][0]['addr']) - self._associate_floatingip(obj, server) - LOG.info("Created master node: %s" % server.id) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - internal_ports = obj.data.get('internal_ports', []) - force = params.get('force', False) - - try: - self._disassociate_floatingip(obj, server_id) - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) diff --git a/contrib/kubernetes/kube/scripts/master.sh b/contrib/kubernetes/kube/scripts/master.sh deleted file mode 100644 index 84b7bcea1..000000000 --- a/contrib/kubernetes/kube/scripts/master.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -HOSTNAME=`hostname` -echo "127.0.0.1 $HOSTNAME" >> /etc/hosts -apt-get update && apt-get install -y docker.io curl apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list -apt-get update -apt-get install -y kubelet kubeadm kubectl -PODNETWORKCIDR=10.244.0.0/16 -kubeadm init --token {{ KUBETOKEN }} --skip-preflight-checks --pod-network-cidr=$PODNETWORKCIDR --apiserver-cert-extra-sans={{ MASTER_FLOATINGIP}} --token-ttl 0 -mkdir -p $HOME/.kube -cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -chown $(id -u):$(id -g) $HOME/.kube/config -mkdir -p root/.kube -cp -i /etc/kubernetes/admin.conf root/.kube/config -chown root:root root/.kube/config -cp -i /etc/kubernetes/admin.conf /opt/admin.kubeconf -echo "# Setup network pod" -kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.9.0/Documentation/kube-flannel.yml -echo "# Install kubernetes dashboard" -kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml -echo "# Install heapster" -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/grafana.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml -kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml -echo "# Download monitor script" -curl -o /opt/monitor.sh https://raw.githubusercontent.com/lynic/templates/master/k8s/monitor.sh -chmod a+x /opt/monitor.sh -echo "*/1 * * * * root bash /opt/monitor.sh 2>&1 >> /var/log/kube-minitor.log" > /etc/cron.d/kube-monitor -systemctl restart cron -echo "# Get status" -kubectl get nodes \ No newline at end of file diff --git a/contrib/kubernetes/kube/scripts/worker.sh b/contrib/kubernetes/kube/scripts/worker.sh deleted file mode 100644 index 2bc14cf2a..000000000 --- a/contrib/kubernetes/kube/scripts/worker.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -HOSTNAME=`hostname` -echo "127.0.0.1 $HOSTNAME" >> /etc/hosts -apt-get update && apt-get install -y docker.io curl apt-transport-https -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list -apt-get update -apt-get install -y kubelet kubeadm kubectl -MASTER_IP={{ MASTERIP }} -kubeadm join --token {{ KUBETOKEN }} --skip-preflight-checks --discovery-token-unsafe-skip-ca-verification $MASTER_IP:6443 diff --git a/contrib/kubernetes/kube/worker.py b/contrib/kubernetes/kube/worker.py deleted file mode 100644 index 26ca641e5..000000000 --- a/contrib/kubernetes/kube/worker.py +++ /dev/null @@ -1,353 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -import jinja2 -from oslo_log import log as logging -from oslo_utils import encodeutils - -from kube import base -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.objects import cluster as cluster_obj - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.KubeBaseProfile): - """Profile for an kubernetes node server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.10'} - ] - } - - KEYS = ( - CONTEXT, FLAVOR, IMAGE, KEY_NAME, BLOCK_DEVICE_MAPPING_V2 - ) = ( - 'context', 'flavor', 'image', 'key_name', 'block_device_mapping_v2', - ) - - KUBE_KEYS = ( - MASTER_CLUSTER, - ) = ( - 'master_cluster', - ) - - MASTER_CLUSTER_KEYS = ( - KUBEADM_TOKEN, KUBE_MASTER_IP, - PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, - ) = ( - 'kubeadm_token', 'kube_master_ip', - 'private_network', 'private_subnet', 'private_router', - ) - - INTERNAL_KEYS = ( - SECURITY_GROUP, SCALE_OUT_RECV_ID, SCALE_OUT_URL, - ) = ( - 'security_group', 'scale_out_recv_id', 'scale_out_url', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - MASTER_CLUSTER: schema.String( - _('Cluster running kubernetes master.'), - required=True, - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _("Volume source type, must be one of 'image', " - "'snapshot', 'volume' or 'blank'"), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _("Volume destination type, must be 'volume' or " - "'local'"), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _get_master_cluster_info(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - cluster = cluster_obj.Cluster.find(ctx, master) - except Exception as ex: - raise exc.EResourceCreation(type='kubernetes.worker', - message=str(ex)) - for key in self.MASTER_CLUSTER_KEYS: - if key not in cluster.data: - raise exc.EResourceCreation( - type='kubernetes.worker', - message="Can't find %s in cluster %s" % (key, master)) - - return cluster.data - - def _set_cluster_dependents(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - master_cluster = cluster_obj.Cluster.find(ctx, master) - except exc.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % master - raise exc.BadRequest(msg=msg) - if master_cluster: - # configure kube master dependents, kube master record kube node - # cluster uuid - master_dependents = master_cluster.dependents - master_dependents['kube-node'] = obj.id - cluster_obj.Cluster.update(ctx, master_cluster.id, - {'dependents': master_dependents}) - - def _del_cluster_dependents(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - master = self.properties[self.MASTER_CLUSTER] - try: - master_cluster = cluster_obj.Cluster.find(ctx, master) - except exc.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % master - raise exc.BadRequest(msg=msg) - - if master_cluster: - # remove kube master record kube node dependents - master_dependents = master_cluster.dependents - if master_dependents and 'kube-node' in master_dependents: - master_dependents.pop('kube-node') - cluster_obj.Cluster.update(ctx, master_cluster.id, - {'dependents': master_dependents}) - - def _get_cluster_data(self, obj): - ctx = context.get_service_context(user_id=obj.user, - project_id=obj.project) - if obj.cluster_id: - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - return cluster.data - - return {} - - def do_cluster_create(self, obj): - self._create_security_group(obj) - self._set_cluster_dependents(obj) - - def do_cluster_delete(self, obj): - self._delete_security_group(obj) - self._del_cluster_dependents(obj) - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - return True - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = obj.name - - metadata = self._build_metadata(obj, {}) - kwargs['metadata'] = metadata - - sgid = self._get_security_group(obj) - kwargs['security_groups'] = [{'name': sgid}] - - jj_vars = {} - master_cluster = self._get_master_cluster_info(obj) - kwargs['networks'] = [{'uuid': master_cluster[self.PRIVATE_NETWORK]}] - jj_vars['KUBETOKEN'] = master_cluster[self.KUBEADM_TOKEN] - jj_vars['MASTERIP'] = master_cluster[self.KUBE_MASTER_IP] - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - obj, block_device_mapping_v2, 'create') - - user_data = base.loadScript('./scripts/worker.sh') - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - - server = None - resource_id = None - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - server = self.compute(obj).server_get(server.id) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - internal_ports = obj.data.get('internal_ports', []) - force = params.get('force', False) - - try: - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) diff --git a/contrib/kubernetes/requirements.txt b/contrib/kubernetes/requirements.txt deleted file mode 100644 index 98592e47d..000000000 --- a/contrib/kubernetes/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4 # BSD License (3 clause) \ No newline at end of file diff --git a/contrib/kubernetes/setup.cfg b/contrib/kubernetes/setup.cfg deleted file mode 100644 index d525be9c4..000000000 --- a/contrib/kubernetes/setup.cfg +++ /dev/null @@ -1,28 +0,0 @@ -[metadata] -name = senlin-kubernetes -summary = Kubernetes profile for senlin -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/senlin/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[entry_points] -senlin.profiles = - senlin.kubernetes.master-1.0 = kube.master:ServerProfile - senlin.kubernetes.worker-1.0 = kube.worker:ServerProfile - -[global] -setup-hooks = - pbr.hooks.setup_hook diff --git a/contrib/kubernetes/setup.py b/contrib/kubernetes/setup.py deleted file mode 100644 index 98b93ebc5..000000000 --- a/contrib/kubernetes/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/contrib/vdu/README.rst b/contrib/vdu/README.rst deleted file mode 100644 index f7ae358b6..000000000 --- a/contrib/vdu/README.rst +++ /dev/null @@ -1,13 +0,0 @@ -# VDU Profile for NFV - -## Install -```bash -pip install --editable . -``` - -## Usage -```bash -. openrc demo demo -senlin profile-create vdu-profile -s examples/vdu.yaml -senlin cluster-create vdu-cluster -p vdu-profile -M config='{"word": "world"}' -c 1 -``` diff --git a/contrib/vdu/examples/vdu.yaml b/contrib/vdu/examples/vdu.yaml deleted file mode 100644 index 8daee4db2..000000000 --- a/contrib/vdu/examples/vdu.yaml +++ /dev/null @@ -1,17 +0,0 @@ -type: os.senlin.vdu -version: 1.0 -properties: - flavor: m1.tiny - image: "cirros-0.3.4-x86_64-uec" - networks: - - network: private - security_groups: - - default - floating_network: public - metadata: - test_key: test_value - user_data: | - #!/bin/sh - echo 'hello, {{ word }}' - echo '{{ ports.0.fixed_ips.0.ip_address }}' - echo '{{ ports.0.floating_ip_address }}' diff --git a/contrib/vdu/requirements.txt b/contrib/vdu/requirements.txt deleted file mode 100644 index 98592e47d..000000000 --- a/contrib/vdu/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4 # BSD License (3 clause) \ No newline at end of file diff --git a/contrib/vdu/setup.cfg b/contrib/vdu/setup.cfg deleted file mode 100644 index 58e92cf15..000000000 --- a/contrib/vdu/setup.cfg +++ /dev/null @@ -1,31 +0,0 @@ -[metadata] -name = senlin-vdu -summary = VDU profile for senlin -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/senlin/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -# [files] -# packages = -# senlin-vdu - -[entry_points] -senlin.profiles = - os.senlin.vdu-1.0 = vdu.server:ServerProfile - -[global] -setup-hooks = - pbr.hooks.setup_hook diff --git a/contrib/vdu/setup.py b/contrib/vdu/setup.py deleted file mode 100644 index 736375744..000000000 --- a/contrib/vdu/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/contrib/vdu/vdu/__init__.py b/contrib/vdu/vdu/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/contrib/vdu/vdu/server.py b/contrib/vdu/vdu/server.py deleted file mode 100644 index 43500b416..000000000 --- a/contrib/vdu/vdu/server.py +++ /dev/null @@ -1,1469 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import copy - -import jinja2 -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.objects import cluster as cluster_obj -from senlin.objects import node as node_obj -from senlin.profiles import base - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.Profile): - """Profile for an OpenStack Nova server.""" - - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ] - } - - KEYS = ( - CONTEXT, ADMIN_PASS, AUTO_DISK_CONFIG, AVAILABILITY_ZONE, - BLOCK_DEVICE_MAPPING_V2, - CONFIG_DRIVE, FLAVOR, IMAGE, KEY_NAME, METADATA, - NAME, NETWORKS, PERSONALITY, SECURITY_GROUPS, - USER_DATA, SCHEDULER_HINTS, - ) = ( - 'context', 'admin_pass', 'auto_disk_config', 'availability_zone', - 'block_device_mapping_v2', - 'config_drive', 'flavor', 'image', 'key_name', 'metadata', - 'name', 'networks', 'personality', 'security_groups', - 'user_data', 'scheduler_hints', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', - ) - - NETWORK_KEYS = ( - PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, - ) = ( - 'port', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', - ) - - PERSONALITY_KEYS = ( - PATH, CONTENTS, - ) = ( - 'path', 'contents', - ) - - SCHEDULER_HINTS_KEYS = ( - GROUP, - ) = ( - 'group', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - ADMIN_PASS: schema.String( - _('Password for the administrator account.'), - ), - AUTO_DISK_CONFIG: schema.Boolean( - _('Whether the disk partition is done automatically.'), - default=True, - ), - AVAILABILITY_ZONE: schema.String( - _('Name of availability zone for running the server.'), - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _('Volume source type, should be image, snapshot, ' - 'volume or blank'), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _('Volume destination type, should be volume or ' - 'local'), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - } - ), - ), - CONFIG_DRIVE: schema.Boolean( - _('Whether config drive should be enabled for the server.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - METADATA: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'server created. Both key and value should be <=255 chars.'), - updatable=True, - ), - NAME: schema.String( - _('Name of the server. When omitted, the node name will be used.'), - updatable=True, - ), - NETWORKS: schema.List( - _('List of networks for the server.'), - schema=schema.Map( - _('A map specifying the properties of a network for uses.'), - schema={ - NETWORK: schema.String( - _('Name or ID of network to create a port on.'), - ), - PORT: schema.String( - _('Port ID to be used by the network.'), - ), - FIXED_IP: schema.String( - _('Fixed IP to be used by the network.'), - ), - PORT_SECURITY_GROUPS: schema.List( - _('A list of security groups to be attached to ' - 'this port.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - FLOATING_NETWORK: schema.String( - _('The network on which to create a floating IP'), - ), - FLOATING_IP: schema.String( - _('The floating IP address to be associated with ' - 'this port.'), - ), - }, - ), - updatable=True, - ), - PERSONALITY: schema.List( - _('List of files to be injected into the server, where each.'), - schema=schema.Map( - _('A map specifying the path & contents for an injected ' - 'file.'), - schema={ - PATH: schema.String( - _('In-instance path for the file to be injected.'), - required=True, - ), - CONTENTS: schema.String( - _('Contents of the file to be injected.'), - required=True, - ), - }, - ), - ), - SCHEDULER_HINTS: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'Scheduler hints. Both key and value should be <=255 chars.'), - ), - - SECURITY_GROUPS: schema.List( - _('List of security groups.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - USER_DATA: schema.String( - _('User data to be exposed by the metadata server.'), - ), - } - - OP_NAMES = ( - OP_REBOOT, OP_REBUILD, OP_CHANGE_PASSWORD, OP_PAUSE, OP_UNPAUSE, - OP_SUSPEND, OP_RESUME, OP_LOCK, OP_UNLOCK, OP_START, OP_STOP, - OP_RESCUE, OP_UNRESCUE, OP_EVACUATE, - ) = ( - 'reboot', 'rebuild', 'change_password', 'pause', 'unpause', - 'suspend', 'resume', 'lock', 'unlock', 'start', 'stop', - 'rescue', 'unrescue', 'evacuate', - ) - - REBOOT_TYPE = 'type' - REBOOT_TYPES = (REBOOT_SOFT, REBOOT_HARD) = ('SOFT', 'HARD') - ADMIN_PASSWORD = 'admin_pass' - RESCUE_IMAGE = 'image_ref' - EVACUATE_OPTIONS = ( - EVACUATE_HOST, EVACUATE_FORCE - ) = ( - 'host', 'force' - ) - - OPERATIONS = { - OP_REBOOT: schema.Operation( - _("Reboot the nova server."), - schema={ - REBOOT_TYPE: schema.StringParam( - _("Type of reboot which can be 'SOFT' or 'HARD'."), - default=REBOOT_SOFT, - constraints=[ - constraints.AllowedValues(REBOOT_TYPES), - ] - ) - } - ), - OP_REBUILD: schema.Operation( - _("Rebuild the server using current image and admin password."), - ), - OP_CHANGE_PASSWORD: schema.Operation( - _("Change the administrator password."), - schema={ - ADMIN_PASSWORD: schema.StringParam( - _("New password for the administrator.") - ) - } - ), - OP_PAUSE: schema.Operation( - _("Pause the server from running."), - ), - OP_UNPAUSE: schema.Operation( - _("Unpause the server to running state."), - ), - OP_SUSPEND: schema.Operation( - _("Suspend the running of the server."), - ), - OP_RESUME: schema.Operation( - _("Resume the running of the server."), - ), - OP_LOCK: schema.Operation( - _("Lock the server."), - ), - OP_UNLOCK: schema.Operation( - _("Unlock the server."), - ), - OP_START: schema.Operation( - _("Start the server."), - ), - OP_STOP: schema.Operation( - _("Stop the server."), - ), - OP_RESCUE: schema.Operation( - _("Rescue the server."), - schema={ - RESCUE_IMAGE: schema.StringParam( - _("A string referencing the image to use."), - ), - } - ), - OP_UNRESCUE: schema.Operation( - _("Unrescue the server."), - ), - OP_EVACUATE: schema.Operation( - _("Evacuate the server to a different host."), - schema={ - EVACUATE_HOST: schema.StringParam( - _("The target host to evacuate the server."), - ), - EVACUATE_FORCE: schema.StringParam( - _("Whether the evacuation should be a forced one.") - ) - } - ) - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - - def _validate_az(self, obj, az_name, reason=None): - try: - res = self.compute(obj).validate_azs([az_name]) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - else: - raise - - if not res: - msg = _("The specified %(key)s '%(value)s' could not be found" - ) % {'key': self.AVAILABILITY_ZONE, 'value': az_name} - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - else: - raise exc.InvalidSpec(message=msg) - - return az_name - - def _validate_flavor(self, obj, name_or_id, reason=None): - flavor = None - msg = '' - try: - flavor = self.compute(obj).flavor_find(name_or_id, False) - except exc.InternalError as ex: - msg = str(ex) - if reason is None: # reason is 'validate' - if ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.FLAVOR, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - if flavor is not None: - if not flavor.is_disabled: - return flavor - msg = _("The specified %(k)s '%(v)s' is disabled" - ) % {'k': self.FLAVOR, 'v': name_or_id} - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - else: - raise exc.InvalidSpec(message=msg) - - def _validate_image(self, obj, name_or_id, reason=None): - try: - return self.compute(obj).image_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.IMAGE, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def _validate_keypair(self, obj, name_or_id, reason=None): - try: - return self.compute(obj).keypair_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.KEY_NAME, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate availability_zone - az_name = self.properties[self.AVAILABILITY_ZONE] - if az_name is not None: - self._validate_az(obj, az_name) - - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - # validate networks - networks = self.properties[self.NETWORKS] - for net in networks: - self._validate_network(obj, net) - - return True - - def _resolve_bdm(self, bdm): - for bd in bdm: - for key in self.BDM2_KEYS: - if bd[key] is None: - del bd[key] - return bdm - - def _check_security_groups(self, nc, net_spec, result): - """Check security groups. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - sgs = net_spec.get(self.PORT_SECURITY_GROUPS) - if not sgs: - return - - res = [] - try: - for sg in sgs: - sg_obj = nc.security_group_find(sg) - res.append(sg_obj.id) - except exc.InternalError as ex: - return str(ex) - - result[self.PORT_SECURITY_GROUPS] = res - return - - def _check_network(self, nc, net, result): - """Check the specified network. - - :param nc: network driver connection. - :param net: the name or ID of network to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if net is None: - return - try: - net_obj = nc.network_get(net) - result[self.NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - def _check_port(self, nc, port, result): - """Check the specified port. - - :param nc: network driver connection. - :param port: the name or ID of port to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if port is None: - return - - try: - port_obj = nc.port_find(port) - if port_obj.status != 'DOWN': - return _("The status of the port %(p)s must be DOWN" - ) % {'p': port} - result[self.PORT] = port_obj.id - return - except exc.InternalError as ex: - return str(ex) - - def _check_floating_ip(self, nc, net_spec, result): - """Check floating IP and network, if specified. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - net = net_spec.get(self.FLOATING_NETWORK) - if net: - try: - net_obj = nc.network_get(net) - result[self.FLOATING_NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - flt_ip = net_spec.get(self.FLOATING_IP) - if not flt_ip: - return - - try: - # Find floating ip with this address - fip = nc.floatingip_find(flt_ip) - if fip: - if fip.status == 'ACTIVE': - return _('the floating IP %s has been used.') % flt_ip - result['floating_ip_id'] = fip.id - return - - # Create a floating IP with address if floating ip unspecified - if not net: - return _('Must specify a network to create a floating IP') - - result[self.FLOATING_IP] = flt_ip - return - except exc.InternalError as ex: - return str(ex) - - def _validate_network(self, obj, net_spec, reason=None): - - def _verify(error): - if error is None: - return - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=error) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=error) - else: - raise exc.InvalidSpec(message=error) - - nc = self.network(obj) - result = {} - - # check network - net = net_spec.get(self.NETWORK) - error = self._check_network(nc, net, result) - _verify(error) - - # check port - port = net_spec.get(self.PORT) - error = self._check_port(nc, port, result) - _verify(error) - - if port is None and net is None: - _verify(_("One of '%(p)s' and '%(n)s' must be provided" - ) % {'p': self.PORT, 'n': self.NETWORK}) - - fixed_ip = net_spec.get(self.FIXED_IP) - if fixed_ip: - if port is not None: - _verify(_("The '%(p)s' property and the '%(fip)s' property " - "cannot be specified at the same time" - ) % {'p': self.PORT, 'fip': self.FIXED_IP}) - result[self.FIXED_IP] = fixed_ip - - # Check security_groups - error = self._check_security_groups(nc, net_spec, result) - _verify(error) - - # Check floating IP - error = self._check_floating_ip(nc, net_spec, result) - _verify(error) - - return result - - def _get_port(self, obj, net_spec): - """Fetch or create a port. - - :param obj: The node object. - :param net_spec: The parameters to create a port. - :returns: Created port object and error message. - """ - port_id = net_spec.get(self.PORT, None) - if port_id: - try: - port = self.network(obj).port_find(port_id) - return port, None - except exc.InternalError as ex: - return None, ex - port_attr = { - 'network_id': net_spec.get(self.NETWORK), - } - fixed_ip = net_spec.get(self.FIXED_IP, None) - if fixed_ip: - port_attr['fixed_ips'] = [fixed_ip] - security_groups = net_spec.get(self.PORT_SECURITY_GROUPS, []) - if security_groups: - port_attr['security_groups'] = security_groups - try: - port = self.network(obj).port_create(**port_attr) - return port, None - except exc.InternalError as ex: - return None, ex - - def _delete_ports(self, obj, ports): - """Delete ports. - - :param obj: The node object - :param ports: A list of internal ports. - :returns: None for succeed or error for failure. - """ - for port in ports: - # remove port created by senlin - if port.get('remove', False): - try: - self.network(obj).port_delete(port['id']) - # remove floating IP created by senlin - if port.get('floating', None) and port[ - 'floating'].get('remove', False): - self.network(obj).floatingip_delete( - port['floating']['id']) - except exc.InternalError as ex: - return ex - ports.remove(port) - node_data = obj.data - node_data['internal_ports'] = ports - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - - def _get_floating_ip(self, obj, fip_spec, port_id): - """Find or Create a floating IP. - - :param obj: The node object. - :param fip_spec: The parameters to create a floating ip - :param port_id: The port ID to associate with - :returns: A floating IP object and error message. - """ - floating_ip_id = fip_spec.get('floating_ip_id', None) - if floating_ip_id: - try: - fip = self.network(obj).floatingip_find(floating_ip_id) - if fip.port_id is None: - attr = {'port_id': port_id} - fip = self.network(obj).floatingip_update(fip, **attr) - return fip, None - except exc.InternalError as ex: - return None, ex - net_id = fip_spec.get(self.FLOATING_NETWORK) - fip_addr = fip_spec.get(self.FLOATING_IP) - attr = { - 'port_id': port_id, - 'floating_network_id': net_id, - } - if fip_addr: - attr.update({'floating_ip_address': fip_addr}) - try: - fip = self.network(obj).floatingip_create(**attr) - return fip, None - except exc.InternalError as ex: - return None, ex - - def _create_ports_from_properties(self, obj, networks, action_type): - """Create or find ports based on networks property. - - :param obj: The node object. - :param networks: The networks property used for node. - :param action_type: Either 'create' or 'update'. - - :returns: A list of created port's attributes. - """ - internal_ports = obj.data.get('internal_ports', []) - if not networks: - return [] - - for net_spec in networks: - net = self._validate_network(obj, net_spec, action_type) - # Create port - port, ex = self._get_port(obj, net) - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs = { - 'id': port.id, - 'network_id': port.network_id, - 'security_group_ids': port.security_group_ids, - 'fixed_ips': port.fixed_ips - } - if self.PORT not in net: - port_attrs.update({'remove': True}) - # Create floating ip - if 'floating_ip_id' in net or self.FLOATING_NETWORK in net: - fip, ex = self._get_floating_ip(obj, net, port_attrs['id']) - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs['floating'] = { - 'id': fip.id, - 'floating_ip_address': fip.floating_ip_address, - 'floating_network_id': fip.floating_network_id, - } - if self.FLOATING_NETWORK in net: - port_attrs['floating'].update({'remove': True}) - internal_ports.append(port_attrs) - if internal_ports: - node_data = obj.data - node_data.update(internal_ports=internal_ports) - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - return internal_ports - - def _build_metadata(self, obj, usermeta): - """Build custom metadata for server. - - :param obj: The node object to operate on. - :return: A dictionary containing the new metadata. - """ - metadata = usermeta or {} - metadata['cluster_node_id'] = obj.id - if obj.cluster_id: - metadata['cluster_id'] = obj.cluster_id - metadata['cluster_node_index'] = str(obj.index) - - return metadata - - def _update_zone_info(self, obj, server): - """Update the actual zone placement data. - - :param obj: The node object associated with this server. - :param server: The server object returned from creation. - """ - if server.availability_zone: - placement = obj.data.get('placement', None) - if not placement: - obj.data['placement'] = {'zone': server.availability_zone} - else: - obj.data['placement'].setdefault('zone', - server.availability_zone) - # It is safe to use admin context here - ctx = context.get_admin_context() - node_obj.Node.update(ctx, obj.id, {'data': obj.data}) - - def _preprocess_user_data(self, obj, extra=None): - """Get jinja2 parameters from metadata config. - - :param obj: The node object. - :param extra: The existing parameters to be merged. - :returns: jinja2 parameters to be used. - """ - def _to_json(astr): - try: - ret = jsonutils.loads(astr) - return ret - except (ValueError, TypeError): - return astr - - extra = extra or {} - n_config = _to_json(obj.metadata.get('config', {})) - # Check node's metadata - if isinstance(n_config, dict): - extra.update(n_config) - # Check cluster's metadata - if obj.cluster_id: - ctx = context.get_service_context( - user=obj.user, project=obj.project) - cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id) - c_config = _to_json(cluster.metadata.get('config', {})) - if isinstance(c_config, dict): - extra.update(c_config) - return extra - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = {} - for key in self.KEYS: - # context is treated as connection parameters - if key == self.CONTEXT: - continue - - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - - admin_pass = self.properties[self.ADMIN_PASS] - if admin_pass: - kwargs.pop(self.ADMIN_PASS) - kwargs['adminPass'] = admin_pass - - auto_disk_config = self.properties[self.AUTO_DISK_CONFIG] - kwargs.pop(self.AUTO_DISK_CONFIG) - kwargs['OS-DCF:diskConfig'] = 'AUTO' if auto_disk_config else 'MANUAL' - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = self.properties[self.NAME] or obj.name - - metadata = self._build_metadata(obj, self.properties[self.METADATA]) - kwargs['metadata'] = metadata - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - block_device_mapping_v2) - - jj_vars = {} - networks = self.properties[self.NETWORKS] - if networks is not None: - ports = self._create_ports_from_properties( - obj, networks, 'create') - jj_vars['ports'] = ports - kwargs['networks'] = [ - {'port': port['id']} for port in ports] - - # Get user_data parameters from metadata - jj_vars = self._preprocess_user_data(obj, jj_vars) - - user_data = self.properties[self.USER_DATA] - if user_data is not None: - # Use jinja2 to replace variables defined in user_data - try: - jj_t = jinja2.Template(user_data) - user_data = jj_t.render(**jj_vars) - except (jinja2.exceptions.UndefinedError, ValueError) as ex: - # TODO(anyone) Handle jinja2 error - pass - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode( - base64.b64encode(ud)) - - secgroups = self.properties[self.SECURITY_GROUPS] - if secgroups: - kwargs['security_groups'] = [{'name': sg} for sg in secgroups] - - if 'placement' in obj.data: - if 'zone' in obj.data['placement']: - kwargs['availability_zone'] = obj.data['placement']['zone'] - - if 'servergroup' in obj.data['placement']: - group_id = obj.data['placement']['servergroup'] - hints = self.properties.get(self.SCHEDULER_HINTS, {}) - hints.update({'group': group_id}) - kwargs['scheduler_hints'] = hints - - server = None - resource_id = 'UNKNOWN' - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server(server.id) - # Update zone placement info if available - self._update_zone_info(obj, server) - return server.id - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - internal_ports = obj.data.get('internal_ports', []) - if not obj.physical_id: - return True - - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - force = params.get('force', False) - - try: - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - driver.wait_for_server_delete(server_id) - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise ex - return True - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) - - def _check_server_name(self, obj, profile): - """Check if there is a new name to be assigned to the server. - - :param obj: The node object to operate on. - :param new_profile: The new profile which may contain a name for - the server instance. - :return: A tuple consisting a boolean indicating whether the name - needs change and the server name determined. - """ - old_name = self.properties[self.NAME] or obj.name - new_name = profile.properties[self.NAME] or obj.name - if old_name == new_name: - return False, new_name - return True, new_name - - def _update_name(self, obj, new_name): - """Update the name of the server. - - :param obj: The node object to operate. - :param new_name: The new name for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_update(obj.physical_id, name=new_name) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _check_password(self, obj, new_profile): - """Check if the admin password has been changed in the new profile. - - :param obj: The server node to operate, not used currently. - :param new_profile: The new profile which may contain a new password - for the server instance. - :return: A tuple consisting a boolean indicating whether the password - needs a change and the password determined which could be - '' if new password is not set. - """ - old_passwd = self.properties.get(self.ADMIN_PASS) or '' - new_passwd = new_profile.properties[self.ADMIN_PASS] or '' - if old_passwd == new_passwd: - return False, new_passwd - return True, new_passwd - - def _update_password(self, obj, new_password): - """Update the admin password for the server. - - :param obj: The node object to operate. - :param new_password: The new password for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_change_password(obj.physical_id, - new_password) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_metadata(self, obj, new_profile): - """Update the server metadata. - - :param obj: The node object to operate on. - :param new_profile: The new profile that may contain some changes to - the metadata. - :returns: ``None`` - :raises: `EResourceUpdate`. - """ - old_meta = self._build_metadata(obj, self.properties[self.METADATA]) - new_meta = self._build_metadata(obj, - new_profile.properties[self.METADATA]) - if new_meta == old_meta: - return - - try: - self.compute(obj).server_metadata_update(obj.physical_id, new_meta) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_flavor(self, obj, new_profile): - """Update server flavor. - - :param obj: The node object to operate on. - :param old_flavor: The identity of the current flavor. - :param new_flavor: The identity of the new flavor. - :returns: ``None``. - :raises: `EResourceUpdate` when operation was a failure. - """ - old_flavor = self.properties[self.FLAVOR] - new_flavor = new_profile.properties[self.FLAVOR] - cc = self.compute(obj) - oldflavor = self._validate_flavor(obj, old_flavor, 'update') - newflavor = self._validate_flavor(obj, new_flavor, 'update') - if oldflavor.id == newflavor.id: - return - - try: - cc.server_resize(obj.physical_id, newflavor.id) - cc.wait_for_server(obj.physical_id, 'VERIFY_RESIZE') - except exc.InternalError as ex: - msg = str(ex) - try: - cc.server_resize_revert(obj.physical_id) - cc.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex1: - msg = str(ex1) - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - - try: - cc.server_resize_confirm(obj.physical_id) - cc.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_image(self, obj, new_profile, new_name, new_password): - """Update image used by server node. - - :param obj: The node object to operate on. - :param new_profile: The profile which may contain a new image name or - ID to use. - :param new_name: The name for the server node. - :param newn_password: The new password for the administrative account - if provided. - :returns: A boolean indicating whether the image needs an update. - :raises: ``InternalError`` if operation was a failure. - """ - old_image = self.properties[self.IMAGE] - new_image = new_profile.properties[self.IMAGE] - if not new_image: - msg = _("Updating Nova server with image set to None is not " - "supported by Nova") - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - # check the new image first - img_new = self._validate_image(obj, new_image, reason='update') - new_image_id = img_new.id - - driver = self.compute(obj) - if old_image: - img_old = self._validate_image(obj, old_image, reason='update') - old_image_id = img_old.id - else: - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - # Still, this 'old_image_id' could be empty, but it doesn't matter - # because the comparison below would fail if that is the case - old_image_id = server.image.get('id', None) - - if new_image_id == old_image_id: - return False - - try: - driver.server_rebuild(obj.physical_id, new_image_id, - new_name, new_password) - driver.wait_for_server(obj.physical_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - return True - - def _update_network_add_port(self, obj, networks): - """Create new interfaces for the server node. - - :param obj: The node object to operate. - :param networks: A list containing information about new network - interfaces to be created. - :returns: ``None``. - :raises: ``EResourceUpdate`` if interaction with drivers failed. - """ - cc = self.compute(obj) - try: - server = cc.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - ports = self._create_ports_from_properties( - obj, networks, 'update') - for port in ports: - params = {'port': port['id']} - try: - cc.server_interface_create(server, **params) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', - id=obj.physical_id, - message=str(ex)) - - def _find_port_by_net_spec(self, obj, net_spec, ports): - """Find existing ports match with specific network properties. - - :param obj: The node object. - :param net_spec: Network property of this profile. - :param ports: A list of ports which attached to this server. - :returns: A list of candidate ports matching this network spec. - """ - # TODO(anyone): handle security_groups - net = self._validate_network(obj, net_spec, 'update') - selected_ports = [] - for p in ports: - floating = p.get('floating', {}) - floating_network = net.get(self.FLOATING_NETWORK, None) - if floating_network and floating.get( - 'floating_network_id') != floating_network: - continue - floating_ip_address = net.get(self.FLOATING_IP, None) - if floating_ip_address and floating.get( - 'floating_ip_address') != floating_ip_address: - continue - # If network properties didn't contain floating ip, - # then we should better not make a port with floating ip - # as candidate. - if (floating and not floating_network and not floating_ip_address): - continue - port_id = net.get(self.PORT, None) - if port_id and p['id'] != port_id: - continue - fixed_ip = net.get(self.FIXED_IP, None) - if fixed_ip: - fixed_ips = [ff['ip_address'] for ff in p['fixed_ips']] - if fixed_ip not in fixed_ips: - continue - network = net.get(self.NETWORK, None) - if network: - net_id = self.network(obj).network_get(network).id - if p['network_id'] != net_id: - continue - selected_ports.append(p) - return selected_ports - - def _update_network_remove_port(self, obj, networks): - """Delete existing interfaces from the node. - - :param obj: The node object to operate. - :param networks: A list containing information about network - interfaces to be created. - :returns: ``None`` - :raises: ``EResourceUpdate`` - """ - cc = self.compute(obj) - nc = self.network(obj) - internal_ports = obj.data.get('internal_ports', []) - - for n in networks: - candidate_ports = self._find_port_by_net_spec( - obj, n, internal_ports) - port = candidate_ports[0] - try: - # Detach port from server - cc.server_interface_delete(port['id'], obj.physical_id) - # delete port if created by senlin - if port.get('remove', False): - nc.port_delete(port['id'], ignore_missing=True) - # delete floating IP if created by senlin - if (port.get('floating', None) and - port['floating'].get('remove', False)): - nc.floatingip_delete(port['floating']['id'], - ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - internal_ports.remove(port) - obj.data['internal_ports'] = internal_ports - node_obj.Node.update(self.context, obj.id, {'data': obj.data}) - - def _update_network(self, obj, new_profile): - """Updating server network interfaces. - - :param obj: The node object to operate. - :param new_profile: The new profile which may contain new network - settings. - :return: ``None`` - :raises: ``EResourceUpdate`` if there are driver failures. - """ - networks_current = self.properties[self.NETWORKS] - networks_create = new_profile.properties[self.NETWORKS] - networks_delete = copy.deepcopy(networks_current) - for network in networks_current: - if network in networks_create: - networks_create.remove(network) - networks_delete.remove(network) - - # Detach some existing interfaces - if networks_delete: - self._update_network_remove_port(obj, networks_delete) - - # Attach new interfaces - if networks_create: - self._update_network_add_port(obj, networks_create) - return - - def do_update(self, obj, new_profile=None, **params): - """Perform update on the server. - - :param obj: the server to operate on - :param new_profile: the new profile for the server. - :param params: a dictionary of optional parameters. - :returns: True if update was successful or False otherwise. - :raises: `EResourceUpdate` if operation fails. - """ - self.server_id = obj.physical_id - if not self.server_id: - return False - - if not new_profile: - return False - - if not self.validate_for_update(new_profile): - return False - - name_changed, new_name = self._check_server_name(obj, new_profile) - passwd_changed, new_passwd = self._check_password(obj, new_profile) - # Update server image: may have side effect of changing server name - # and/or admin password - image_changed = self._update_image(obj, new_profile, new_name, - new_passwd) - if not image_changed: - # we do this separately only when rebuild wasn't performed - if name_changed: - self._update_name(obj, new_name) - if passwd_changed: - self._update_password(obj, new_passwd) - - # Update server flavor: note that flavor is a required property - self._update_flavor(obj, new_profile) - self._update_network(obj, new_profile) - - # TODO(Yanyan Hu): Update block_device properties - # Update server metadata - self._update_metadata(obj, new_profile) - - return True - - def do_get_details(self, obj): - known_keys = { - 'OS-DCF:diskConfig', - 'OS-EXT-AZ:availability_zone', - 'OS-EXT-STS:power_state', - 'OS-EXT-STS:vm_state', - 'accessIPv4', - 'accessIPv6', - 'config_drive', - 'created', - 'hostId', - 'id', - 'key_name', - 'locked', - 'metadata', - 'name', - 'os-extended-volumes:volumes_attached', - 'progress', - 'status', - 'updated' - } - if obj.physical_id is None or obj.physical_id == '': - return {} - - driver = self.compute(obj) - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - return { - 'Error': { - 'code': ex.code, - 'message': str(ex) - } - } - - if server is None: - return {} - server_data = server.to_dict() - details = { - 'image': server_data['image']['id'], - 'flavor': server_data['flavor']['id'], - } - for key in known_keys: - if key in server_data: - details[key] = server_data[key] - - # process special keys like 'OS-EXT-STS:task_state': these keys have - # a default value '-' when not existing - special_keys = [ - 'OS-EXT-STS:task_state', - 'OS-SRV-USG:launched_at', - 'OS-SRV-USG:terminated_at', - ] - for key in special_keys: - if key in server_data: - val = server_data[key] - details[key] = val if val else '-' - - # process network addresses - details['addresses'] = copy.deepcopy(server_data['addresses']) - - # process security groups - sgroups = [] - if 'security_groups' in server_data: - for sg in server_data['security_groups']: - sgroups.append(sg['name']) - if len(sgroups) == 0: - details['security_groups'] = '' - elif len(sgroups) == 1: - details['security_groups'] = sgroups[0] - else: - details['security_groups'] = sgroups - - return dict((k, details[k]) for k in sorted(details)) - - def do_join(self, obj, cluster_id): - if not obj.physical_id: - return False - - driver = self.compute(obj) - metadata = driver.server_metadata_get(obj.physical_id) or {} - metadata['cluster_id'] = cluster_id - metadata['cluster_node_index'] = str(obj.index) - driver.server_metadata_update(obj.physical_id, metadata) - return super(ServerProfile, self).do_join(obj, cluster_id) - - def do_leave(self, obj): - if not obj.physical_id: - return False - - keys = ['cluster_id', 'cluster_node_index'] - self.compute(obj).server_metadata_delete(obj.physical_id, keys) - return super(ServerProfile, self).do_leave(obj) - - def do_check(self, obj): - if not obj.physical_id: - return False - - try: - server = self.compute(obj).server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='checking', type='server', - id=obj.physical_id, - message=str(ex)) - - if (server is None or server.status != 'ACTIVE'): - return False - - return True - - def do_recover(self, obj, **options): - """Handler for recover operation. - - :param obj: The node object. - :param dict options: A list for operations each of which has a name - and optionally a map from parameter to values. - """ - operation = options.get('operation', None) - - if operation and not isinstance(operation, str): - operation = operation[0] - - op_name = operation['name'] - if op_name.upper() != consts.RECOVER_RECREATE: - op_params = operation.get('params', {}) - if op_name.lower() not in self.OP_NAMES: - LOG.error("The operation '%s' is not supported", op_name) - return False - - method = getattr(self, "handle_" + op_name.lower()) - return method(obj, **op_params) - - return super(ServerProfile, self).do_recover(obj, **options) - - def handle_reboot(self, obj, **options): - """Handler for the reboot operation.""" - if not obj.physical_id: - return False - - reboot_type = options.get(self.REBOOT_TYPE, self.REBOOT_SOFT) - if (not isinstance(reboot_type, str) or - reboot_type not in self.REBOOT_TYPES): - return False - - self.compute(obj).server_reboot(obj.physical_id, reboot_type) - self.compute(obj).wait_for_server(obj.physical_id, 'ACTIVE') - return True - - def handle_rebuild(self, obj, **options): - if not obj.physical_id: - return False - - server_id = obj.physical_id - driver = self.compute(obj) - try: - server = driver.server_get(server_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - - if server is None or server.image is None: - return False - - image_id = server.image['id'] - admin_pass = self.properties.get(self.ADMIN_PASS) - try: - driver.server_rebuild(server_id, image_id, - self.properties.get(self.NAME), - admin_pass) - driver.wait_for_server(server_id, 'ACTIVE') - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - return True - - def handle_change_password(self, obj, **options): - """Handler for the change_password operation.""" - if not obj.physical_id: - return False - - password = options.get(self.ADMIN_PASSWORD, None) - if (password is None or not isinstance(password, str)): - return False - - self.compute(obj).server_change_password(obj.physical_id, password) - return True diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 1026aeb3e..000000000 --- a/devstack/README.rst +++ /dev/null @@ -1,21 +0,0 @@ -=========================== -Enabling senlin in DevStack -=========================== - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack - $ cd devstack - -2. Add following repo as external repositories into your ``local.conf`` file:: - - [[local|localrc]] - #Enable senlin - enable_plugin senlin https://git.openstack.org/openstack/senlin - #Enable senlin-dashboard - enable_plugin senlin-dashboard https://git.openstack.org/openstack/senlin-dashboard - -Optionally, you can add a line ``SENLIN_USE_MOD_WSGI=True`` to the same ``local.conf`` -file if you prefer running the Senlin API service under Apache. - -3. Run ``./stack.sh``. diff --git a/devstack/files/apache-senlin-api.template b/devstack/files/apache-senlin-api.template deleted file mode 100644 index 89b266925..000000000 --- a/devstack/files/apache-senlin-api.template +++ /dev/null @@ -1,28 +0,0 @@ - - Require all granted - - - - WSGIDaemonProcess senlin-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup senlin-api - WSGIScriptAlias / %SENLIN_BIN_DIR%/senlin-wsgi-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - AllowEncodedSlashes on - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/senlin-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /cluster %SENLIN_BIN_DIR%/senlin-wsgi-api - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup senlin-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/devstack/lib/senlin b/devstack/lib/senlin deleted file mode 100644 index 97ccfbd6e..000000000 --- a/devstack/lib/senlin +++ /dev/null @@ -1,329 +0,0 @@ -#!/bin/bash -# -# lib/senlin -# Install and start **Senlin** service - -# To enable, add the following to local.conf -# -# [[local|localrc]] -# enable_plugin senlin https://git.openstack.org/openstack/senlin - -# Dependencies: -# -# - functions -# - HORIZON_DIR - -# stack.sh -# --------- -# - config_senlin_dashboard -# - configure_senlin -# - cleanup_senlin -# - cleanup_senlin_dashboard -# - create_senlin_cache_dir -# - create_senlin_accounts -# - init_senlin -# - install_senlinclient -# - install_senlin -# - install_senlin_dashboard -# - is_senlin_enabled -# - start_senlin -# - stop_senlin - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# set up default -SENLIN_AUTH_CACHE_DIR=${SENLIN_AUTH_CACHE_DIR:-/var/cache/senlin} -SENLIN_CONF_DIR=/etc/senlin -SENLIN_CONF=$SENLIN_CONF_DIR/senlin.conf -SENLIN_API_HOST=${SENLIN_API_HOST:-$SERVICE_HOST} -SENLIN_WSGI_MODE=${SENLIN_WSGI_MODE:-"uwsgi"} - -SENLIN_DIR=$DEST/senlin -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["senlin"]=${SENLIN_DIR}.venv - SENLIN_BIN_DIR=${PROJECT_VENV["senlin"]}/bin -else - SENLIN_BIN_DIR=$(get_python_exec_prefix) -fi -SENLIN_REPO=${SENLIN_REPO:-${GIT_BASE}/openstack/senlin.git} -SENLIN_BRANCH=${SENLIN_BRANCH:-master} - -SENLINCLIENT_DIR=$DEST/python-senlinclient -SENLINCLIENT_REPO=${SENLINCLIENT_REPO:-${GIT_BASE}/openstack/python-senlinclient.git} -SENLINCLIENT_BRANCH=${SENLINCLIENT_BRANCH:-master} - -SENLIN_DASHBOARD_DIR=$DEST/senlin-dashboard -SENLIN_DASHBOARD_REPO=${SENLIN_DASHBOARD_REPO:-${GIT_BASE}/openstack/senlin-dashboard.git} -SENLIN_DASHBOARD_BRANCH=${SENLIN_DASHBOARD_BRANCH:-master} - -SENLIN_UWSGI=$SENLIN_BIN_DIR/senlin-wsgi-api -SENLIN_UWSGI_CONF=$SENLIN_CONF_DIR/senlin-api-uwsgi.ini - -if is_service_enabled tls-proxy; then - SENLIN_SERVICE_PROTOCOL="https" -fi - -SENLIN_SERVICE_PROTOCOL=${SENLIN_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Functions -# --------- - -# Test if any Senlin services are enabled -function is_senlin_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"sl-" ]] && return 0 - return 1 -} - -# cleanup_senlin() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_senlin { - sudo rm -f $(apache_site_config_for senlin-api) - remove_uwsgi_config "$SENLIN_UWSGI_CONF" "$SENLIN_UWSGI" - sudo rm -rf $SENLIN_AUTH_CACHE_DIR - sudo rm -rf $SENLIN_CONF_DIR -} - -# configure_senlin() - Set config files, create data dirs, etc -function configure_senlin { - if [[ ! -d $SENLIN_CONF_DIR ]]; then - sudo mkdir -p $SENLIN_CONF_DIR - fi - - sudo chown $STACK_USER $SENLIN_CONF_DIR - - sudo install -d -o $STACK_USER $SENLIN_CONF_DIR - - SENLIN_API_PASTE_FILE=$SENLIN_CONF_DIR/api-paste.ini - - cp $SENLIN_DIR/etc/senlin/api-paste.ini $SENLIN_API_PASTE_FILE - - # common options - iniset $SENLIN_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $SENLIN_CONF DEFAULT auth_encryption_key $(generate_hex_string 16) - iniset $SENLIN_CONF DEFAULT default_region_name "$REGION_NAME" - - - if [ "$USE_SYSTEMD" != "False" ]; then - setup_systemd_logging $SENLIN_CONF - fi - - if [ "$LOG_COLOR" == "True" ] && [ "$USE_SYSTEMD" == "False" ]; then - # Add color to logging output - setup_colorized_logging $SENLIN_CONF DEFAULT - fi - - # rpc - iniset_rpc_backend senlin $SENLIN_CONF - - # Database connection - iniset $SENLIN_CONF database connection `database_connection_url senlin` - - # Keystone authtoken middleware - #configure_auth_token_middleware $SENLIN_CONF senlin $SENLIN_AUTH_CACHE_DIR - iniset $SENLIN_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE - iniset $SENLIN_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI - iniset $SENLIN_CONF keystone_authtoken username senlin - iniset $SENLIN_CONF keystone_authtoken password $SERVICE_PASSWORD - iniset $SENLIN_CONF keystone_authtoken project_name $SERVICE_TENANT_NAME - iniset $SENLIN_CONF keystone_authtoken project_domain_name Default - iniset $SENLIN_CONF keystone_authtoken user_domain_name Default - iniset $SENLIN_CONF keystone_authtoken auth_type password - iniset $SENLIN_CONF keystone_authtoken service_token_roles_required True - iniset $SENLIN_CONF keystone_authtoken interface public - - # Senlin service credentials - iniset $SENLIN_CONF authentication auth_url $KEYSTONE_AUTH_URI/v3 - iniset $SENLIN_CONF authentication service_username senlin - iniset $SENLIN_CONF authentication service_password $SERVICE_PASSWORD - iniset $SENLIN_CONF authentication service_project_name $SERVICE_TENANT_NAME - - # Senlin Conductor options - iniset $SENLIN_CONF conductor workers $API_WORKERS - - # Senlin Conductor options - iniset $SENLIN_CONF engine workers $API_WORKERS - - # Senlin Health-Manager options - iniset $SENLIN_CONF health_manager workers $API_WORKERS - - # Zaqar options for message receiver - iniset $SENLIN_CONF zaqar auth_type password - iniset $SENLIN_CONF zaqar username zaqar - iniset $SENLIN_CONF zaqar password $SERVICE_PASSWORD - iniset $SENLIN_CONF zaqar project_name $SERVICE_TENANT_NAME - iniset $SENLIN_CONF zaqar auth_url $KEYSTONE_AUTH_URI/v3 - iniset $SENLIN_CONF zaqar user_domain_name Default - iniset $SENLIN_CONF zaqar project_domain_name Default - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$SENLIN_UWSGI_CONF" "$SENLIN_UWSGI" "/cluster" - else - _config_senlin_apache_wsgi - fi -} - -# _config_senlin_apache_wsgi() - Configure mod_wsgi -function _config_senlin_apache_wsgi { - local senlin_api_apache_conf - local venv_path="" - local senlin_bin_dir="" - senlin_bin_dir=$(get_python_exec_prefix) - senlin_api_apache_conf=$(apache_site_config_for senlin-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["senlin"]}/lib/$(python_version)/site-packages" - senlin_bin_dir=${PROJECT_VENV["senlin"]}/bin - fi - - sudo cp $SENLIN_DIR/devstack/files/apache-senlin-api.template $senlin_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%SENLIN_BIN_DIR%|$SENLIN_BIN_DIR|g; - s|%SSLENGINE%|$senlin_ssl|g; - s|%SSLCERTFILE%|$senlin_certfile|g; - s|%SSLKEYFILE%|$senlin_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g; - s|%APIWORKERS%|$API_WORKERS|g; - " -i $senlin_api_apache_conf -} - -# init_senlin() - Initialize database -function init_senlin { - # (re)create senlin database - recreate_database senlin utf8 - - if [[ "$USE_SQLALCHEMY_LATEST" == "True" ]]; then - pip3 install --upgrade alembic sqlalchemy - fi - - $SENLIN_BIN_DIR/senlin-manage db_sync - create_senlin_cache_dir -} - -# create_senlin_cache_dir() - Part of the init_senlin() process -function create_senlin_cache_dir { - # Create cache dirs - sudo mkdir -p $SENLIN_AUTH_CACHE_DIR - sudo install -d -o $STACK_USER $SENLIN_AUTH_CACHE_DIR -} - -# install_senlinclient() - Collect source and prepare -function install_senlinclient { - if use_library_from_git "python-senlinclient"; then - git_clone $SENLINCLIENT_REPO $SENLINCLIENT_DIR $SENLINCLIENT_BRANCH - setup_develop $SENLINCLIENT_DIR - else - pip_install --upgrade python-senlinclient - fi -} - -# install_senlin_dashboard() - Collect source and prepare -function install_senlin_dashboard { - # NOTE(Liuqing): workaround for devstack bug: 1540328 - # https://bugs.launchpad.net/devstack/+bug/1540328 - # where devstack install 'test-requirements' but should not do it - # for senlin-dashboard project as it installs Horizon from url. - # Remove following two 'mv' commands when mentioned bug is fixed. - if use_library_from_git "senlin-dashboard"; then - git_clone $SENLIN_DASHBOARD_REPO $SENLIN_DASHBOARD_DIR $SENLIN_DASHBOARD_BRANCH - mv $SENLIN_DASHBOARD_DIR/test-requirements.txt $SENLIN_DASHBOARD_DIR/_test-requirements.txt - setup_develop $SENLIN_DASHBOARD_DIR - mv $SENLIN_DASHBOARD_DIR/_test-requirements.txt $SENLIN_DASHBOARD_DIR/test-requirements.txt - else - pip_install --upgrade senlin-dashboard - fi -} - -# configure_senlin_dashboard() - Set config files -function config_senlin_dashboard { - # Install Senlin Dashboard as plugin for Horizon - ln -sf $SENLIN_DASHBOARD_DIR/senlin_dashboard/enabled/_50_senlin.py $HORIZON_DIR/openstack_dashboard/local/enabled/_50_senlin.py - # Enable senlin policy - ln -sf $SENLIN_DASHBOARD_DIR/senlin_dashboard/conf/senlin_policy.json $HORIZON_DIR/openstack_dashboard/conf/senlin_policy.json -} - -# cleanup_senlin_dashboard() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_senlin_dashboard { - sudo rm -rf $HORIZON_DIR/openstack_dashboard/local/enabled/_50_senlin.py - sudo rm -rf $HORIZON_DIR/openstack_dashboard/conf/senlin_policy.json -} - -# install_senlin() - Collect source and prepare -function install_senlin { - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - install_apache_uwsgi - else - install_apache_wsgi - fi - - git_clone $SENLIN_REPO $SENLIN_DIR $SENLIN_BRANCH - setup_develop $SENLIN_DIR -} - -# start_senlin() - Start running processes, including screen -function start_senlin { - run_process sl-eng "$SENLIN_BIN_DIR/senlin-engine --config-file=$SENLIN_CONF" - run_process sl-conductor "$SENLIN_BIN_DIR/senlin-conductor --config-file=$SENLIN_CONF" - run_process sl-health-manager "$SENLIN_BIN_DIR/senlin-health-manager --config-file=$SENLIN_CONF" - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - run_process sl-api "$(which uwsgi) --procname-prefix senlin-api --ini $SENLIN_UWSGI_CONF" - else - enable_apache_site senlin-api - restart_apache_server - tail_log senlin-api /var/log/$APACHE_NAME/senlin-api.log - fi - - echo "Waiting for senlin-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $SENLIN_SERVICE_PROTOCOL://$SENLIN_API_HOST/cluster; then - die $LINENO "senlin-api did not start" - fi -} - -# stop_senlin() - Stop running processes -function stop_senlin { - # Kill the screen windows - stop_process sl-eng - stop_process sl-conductor - stop_process sl-health-manager - - if [[ "$SENLIN_WSGI_MODE" == "uwsgi" ]]; then - stop_process sl-api - else - disable_apache_site senlin-api - restart_apache_server - fi -} - -# create_senlin_accounts() - Set up common required senlin accounts -function create_senlin_accounts { - create_service_user "senlin" - - local senlin_api_url="$SENLIN_SERVICE_PROTOCOL://$SENLIN_API_HOST/cluster" - - get_or_create_service "senlin" "clustering" "Senlin Clustering Service" - get_or_create_endpoint "clustering" \ - "$REGION_NAME" \ - "$senlin_api_url" \ - "$senlin_api_url" \ - "$senlin_api_url" - - # get or add 'service' role to 'senlin' on 'demo' project - get_or_add_user_project_role "service" "senlin" "demo" -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index d5b9bad91..000000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,58 +0,0 @@ -# senlin.sh - Devstack extras script to install senlin - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "senlin's plugin.sh was called..." -. $DEST/senlin/devstack/lib/senlin -(set -o posix; set) - -if is_service_enabled sl-api sl-eng; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing senlin" - install_senlin - echo_summary "Installing senlinclient" - install_senlinclient - if is_service_enabled horizon; then - echo_summary "Installing senlin dashboard" - install_senlin_dashboard - fi - cleanup_senlin - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring senlin" - configure_senlin - - if is_service_enabled horizon; then - echo_summary "Configuring senlin dashboard" - config_senlin_dashboard - fi - - if is_service_enabled key; then - create_senlin_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize senlin - init_senlin - - # Start the senlin API and senlin taskmgr components - echo_summary "Starting senlin" - start_senlin - fi - - if [[ "$1" == "unstack" ]]; then - stop_senlin - fi - - if [[ "$1" == "clean" ]]; then - cleanup_senlin - - if is_service_enabled horizon; then - cleanup_senlin_dashboard - fi - fi -fi - -# Restore xtrace -$XTRACE diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index b97367d4d..000000000 --- a/devstack/settings +++ /dev/null @@ -1,6 +0,0 @@ -# Devstack settings - -# We have to add Senlin to enabled services for screen_it to work -# It consists of 2 parts: sl-api (API), sl-eng (Engine). - -enable_service sl-api sl-eng sl-conductor sl-health-manager diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index 6438f1c05..000000000 --- a/doc/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -target/ -build/ diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 32b2cee35..000000000 --- a/doc/Makefile +++ /dev/null @@ -1,159 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " xml to make Docutils-native XML files" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Heat.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Heat.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Heat" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Heat" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The xml files are in $(BUILDDIR)/xml." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index fa627d005..000000000 --- a/doc/README.rst +++ /dev/null @@ -1,55 +0,0 @@ -=========================== -Building the developer docs -=========================== - -Dependencies -============ - -You'll need to install python *Sphinx* package and *oslosphinx* -package: - -:: - - sudo pip install sphinx oslosphinx - -If you are using the virtualenv you'll need to install them in the -virtualenv. - -Get Help -======== - -Just type make to get help: - -:: - - make - -It will list available build targets. - -Build Doc -========= - -To build the man pages: - -:: - - make man - -To build the developer documentation as HTML: - -:: - - make html - -Type *make* for more formats. - -Test Doc -======== - -If you modify doc files, you can type: - -:: - - make doctest - -to check whether the format has problem. diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 7bd856e2b..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -# this is required for the docs build jobs -openstackdocstheme>=2.2.1 # Apache-2.0 -os-api-ref>=1.4.0 # Apache-2.0 -sphinx>=2.0.0,!=2.1.0 # BSD -reno>=3.1.0 # Apache-2.0 diff --git a/doc/source/admin/authentication.rst b/doc/source/admin/authentication.rst deleted file mode 100644 index 908ff109e..000000000 --- a/doc/source/admin/authentication.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============== -Authentication -============== - -(TBD) - -This document describes the authentication model used by Senlin. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 5aa1b21b7..000000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Administering Senlin -==================== - -.. toctree:: - :maxdepth: 1 - - authentication diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 64b63374b..000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - - -sys.path.insert(0, os.path.abspath('../..')) - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) - -sys.path.insert(0, ROOT) -sys.path.insert(0, BASE_DIR) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'sphinx.ext.graphviz', - 'sphinx.ext.intersphinx', - 'openstackdocstheme', - 'oslo_config.sphinxext', - 'oslo_policy.sphinxext', - 'oslo_policy.sphinxpolicygen', - 'ext.resources' -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/senlin' -openstackdocs_bug_project = 'senlin' -openstackdocs_bug_tag = '' - -policy_generator_config_file = ( - '../../tools/policy-generator.conf' -) -sample_policy_basename = '_static/senlin' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'senlin' -copyright = '2015, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# html_static_path = ['static'] - -# The theme to use for HTML and HTML Help pages. See the documentation for a -# list of builtin themes. -html_theme = 'openstackdocs' - -# Add any paths that contain custom themes here, relative to this directory -# html_theme_path = [] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} - -suppress_warnings = ['ref.option'] - -[extensions] -# todo_include_todos = True diff --git a/doc/source/configuration/config.rst b/doc/source/configuration/config.rst deleted file mode 100644 index 3ecfafc3e..000000000 --- a/doc/source/configuration/config.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -===================== -Configuration Options -===================== - -Senlin uses `oslo.config` to define and manage configuration options to -allow the deployer to control many aspects of the service API and the service -engine. - -.. show-options:: senlin.conf - -Options -======= - -.. currentmodule:: senlin.conf.opts - -.. autofunction:: list_opts \ No newline at end of file diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst deleted file mode 100644 index 1debb5324..000000000 --- a/doc/source/configuration/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Senlin Configuration -==================== - -.. toctree:: - :maxdepth: 2 - - config - policy - sample-policy-yaml diff --git a/doc/source/configuration/policy.rst b/doc/source/configuration/policy.rst deleted file mode 100644 index c4d4a7bfe..000000000 --- a/doc/source/configuration/policy.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======================================= -Senlin Sample Policy Configuration File -======================================= - -.. warning:: - - JSON formatted policy file is deprecated since Senlin 11.0.0 (Wallaby). - This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing - JSON-formatted policy file to YAML in a backward-compatible way. - -.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html - -The following is an overview of all available access policies in Senlin. -For a sample configuration file, refer to :doc:`sample-policy-yaml`. - -.. show-policy:: - :config-file: ../../tools/policy-generator.conf diff --git a/doc/source/configuration/sample-policy-yaml.rst b/doc/source/configuration/sample-policy-yaml.rst deleted file mode 100644 index e397b81f7..000000000 --- a/doc/source/configuration/sample-policy-yaml.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -=========== -policy.yaml -=========== - -.. warning:: - - JSON formatted policy file is deprecated since Senlin 11.0.0 (Wallaby). - This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing - JSON-formatted policy file to YAML in a backward-compatible way. - -.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html - -Use the ``policy.yaml`` file to define additional access controls that will be -applied to Senlin: - -.. literalinclude:: ../_static/senlin.policy.yaml.sample diff --git a/doc/source/contributor/action.rst b/doc/source/contributor/action.rst deleted file mode 100644 index ba87d1bd4..000000000 --- a/doc/source/contributor/action.rst +++ /dev/null @@ -1,317 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======= -Actions -======= - -An action is an abstraction of some logic that can be executed by a worker -thread. Most of the operations supported by Senlin are executed asynchronously, -which means they are queued into database and then picked up by certain worker -thread for execution. - -Currently, Senlin only supports builtin actions listed below. In future, we -may evolve to support user-defined actions (UDAs). A user-defined action may -carry a Shell script to be executed on a target Nova server, or a Heat -SoftwareConfig to be deployed on a stack, for example. The following builtin -actions are supported at the time of this design: - -- ``CLUSTER_CREATE``: An action for creating a cluster; -- ``CLUSTER_DELETE``: An action for deleting a cluster; -- ``CLUSTER_UPDATE``: An action for updating a cluster; -- ``CLUSTER_ADD_NODES``: An action for adding existing nodes to a cluster; -- ``CLUSTER_DEL_NODES``: An action for removing nodes from a cluster; -- ``CLUSTER_REPLACE_NODES``: An action for replacing nodes in a cluster; -- ``CLUSTER_RESIZE``: An action for adjusting the size of a cluster; -- ``CLUSTER_SCALE_IN``: An action to shrink the size of a cluster by removing - nodes from the cluster; -- ``CLUSTER_SCALE_OUT``: An action to extend the size of a cluster by creating - new nodes using the ``profile_id`` of the cluster; -- ``CLUSTER_ATTACH_POLICY``: An action to attach a policy to a cluster; -- ``CLUSTER_DETACH_POLICY``: An action to detach a policy from a cluster; -- ``CLUSTER_UPDATE_POLICY``: An action to update the properties of a binding - between a cluster and a policy; -- ``CLUSTER_CHECK``: An action for checking a cluster and execute ``NODE_CHECK`` - for all its nodes; -- ``CLUSTER_RECOVER``: An action for recovering a cluster and execute - ``NODE_RECOVER`` for all the nodes in 'ERROR' status; -- ``NODE_CREATE``: An action for creating a new node; -- ``NODE_DELETE``: An action for deleting an existing node; -- ``NODE_UPDATE``: An action for updating the properties of an existing node; -- ``NODE_JOIN``: An action for joining a node to an existing cluster; -- ``NODE_LEAVE``: An action for a node to leave its current owning cluster; -- ``NODE_CHECK``: An action for checking a node to see if its physical node is - 'ACTIVE' and update its status with 'ERROR' if not; -- ``NODE_RECOVER``: An action for recovering a node; - - -Action Properties -~~~~~~~~~~~~~~~~~ - -An action has the following properties when created: - -- ``id``: a globally unique ID for the action object; -- ``name``: a string representation of the action name which might be - generated automatically for actions derived from other operations; -- ``context``: a dictionary that contains the calling context that will be - used by the engine when executing the action. Contents in this dictionary - may contain sensitive information such as user credentials. -- ``action``: a text property that contains the action body to be executed. - Currently, this property only contains the name of a builtin action. In - future, we will provide a structured definition of action for UDAs. -- ``target``: the UUID of an object (e.g. a cluster, a node or a policy) to - be operated; -- ``cause``: a string indicating the reason why this action was created. The - purpose of this property is for the engine to check whether a new lock should - be acquired before operating an object. Valid values for this property - include: - - * ``RPC Request``: this indicates that the action was created upon receiving - a RPC request from Senlin API, which means a lock is likely needed; - * ``Derived Action``: this indicates that the action was created internally - as part of the execution path of another action, which means a lock might - have been acquired; - -- ``owner``: the UUID of a worker thread that currently "owns" this action and - is responsible for executing it. -- ``interval``: the interval (in seconds) for repetitive actions, a value of 0 - means that the action won't be repeated; -- ``start_time``: timestamp when the action was last started. This field is - provided for action execution timeout detection; -- ``stop_time``: timestamp when the action was stopped. This field is provided - for measuring the execution time of an action; -- ``timeout``: timeout (in seconds) for the action execution. A value of 0 - means that the action does not have a customized timeout constraint, though - it may still have to honor the system wide ``default_action_timeout`` - setting. -- ``status``: a string representation of the current status of the action. See - subsection below for detailed status definitions. -- ``status_reason``: a string describing the reason that has led the action to - its current status. -- ``control``: a string for holding the pending signals such as ``CANCEL``, - ``SUSPEND`` or ``RESUME``. -- ``inputs``: a dictionary that provides inputs to the action when executed; -- ``outputs``: a dictionary that captures the outputs (including error - messages) from the action execution; -- ``depends_on``: a UUID list for the actions that must be successfully - completed before the current action becomes ``READY``. An action cannot - become ``READY`` when this property is not an empty string. -- ``depended_by``: a UUID list for the actions that depends on the successful - completion of current action. When the current action is completed with a - success, the actions listed in this property will get notified. -- ``created_at``: the timestamp when the action was created; -- ``updated_at``: the timestamp when the action was last updated; - -*TODO*: Add support for scheduled action execution. - -*NOTE*: The default value of the ``default_action_timeout`` is 3600 seconds. - - -The Action Data Property ------------------------- - -An action object has a property named ``data`` which is used for saving policy -decisions. This property is a Python dict for different policies to save and -exchange policy decision data. - -Suppose we have a scaling policy, a deletion policy and a load-balancing -policy attached to the same cluster. By design, when an ``CLUSTER_SCALE_IN`` -action is picked up for execution, the following sequence will happen: - -1) When the action is about to be executed, the worker thread checks all - policies that have registered a "pre_op" on this action type. -2) Based on the built-in priority setting, the "pre_op" of the scaling policy - is invoked, and the policy determines the number of nodes to be deleted. - This decision is saved to the action's ``data`` property in the following - format: - -:: - - "deletion": { - "count": 2 - } - -3) Based on the built-in priority setting, the deletion policy is evaluated - next. When the "pre_op" method of the deletion policy is invoked, it first - checks the ``data`` property of the action where it finds out the number of - nodes to delete. Then it will calculate the list of candidates to be - deleted using its selection criteria (e.g. ``OLDEST_FIRST``). Finally, it - saves the list of candidate nodes to be deleted to the ``data`` property of - the action, in the following format: - -:: - - "deletion": { - "count": 2, - "candidates": ["1234-4567-9900", "3232-5656-1111"] - } - -4) According to the built-in priority setting, the load-balancing policy is - evaluated last. When invoked, its "pre_op" method checks the ``data`` - property of the action and finds out the candidate nodes to be removed from - the cluster. With this information, the method removes the nodes from the - load-balancer maintained by the policy. - -5) The action's ``execute()`` method is now invoked and it removes the nodes - as given in its ``data`` property, updates the cluster's last update - timestamp, then returns. - -From the example above, we can see that the ``data`` property of an action -plays a critical role in policy checking and enforcement. To avoid losing of -the in-memory ``data`` content during service restart, Senlin persists the -content to database whenever it is changed. - -Note that there are policies that will write to the ``data`` property of a -node for a similar reason. For example, a placement policy may decide where a -new node should be created. This information is saved into the ``data`` -property of a node. When a profile is about to create a node, it is supposed -to check this property and enforce it. For a Nova server profile, this means -that the profile code will inject ``scheduler_hints`` to the server instance -before it is created. - - -Action Statuses -~~~~~~~~~~~~~~~ - -An action can be in one of the following statuses during its lifetime: - -- ``INIT``: Action object is being initialized, not ready for execution; -- ``READY``: Action object can be picked up by any worker thread for - execution; -- ``WAITING``: Action object has dependencies on other actions, it may - become ``READY`` only when the dependents are all completed with successes; -- ``WAITING_LIFECYCLE_COMPLETION``: Action object is a node deletion that is - awaiting lifecycle completion. It will become ``READY`` when complete - lifecycle API is called or the lifecycle hook timeout in deletion policy is - reached. -- ``RUNNING``: Action object is being executed by a worker thread; -- ``SUSPENDED``: Action object is suspended during execution, so the only way - to put it back to ``RUNNING`` status is to send it a ``RESUME`` signal; -- ``SUCCEEDED``: Action object has completed execution with a success; -- ``FAILED``: Action object execution has been aborted due to failures; -- ``CANCELLED``: Action object execution has been aborted due to a ``CANCEL`` - signal. - -Collectively, the ``SUCCEEDED``, ``FAILED`` and ``CANCELLED`` statuses are all -valid action completion status. - - -The ``execute()`` Method and Return Values -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each subclass of the base ``Action`` must provide an implementation of the -``execute()`` method which provides the actual logic to be invoked by the -generic action execution framework. - -Senlin defines a protocol for the execution of actions. The ``execute()`` -method should always return a tuple ``, `` where the ```` -indicates whether the action procedure execution was successful and the -```` provides an explanation of the result, e.g. the error message -when the execution has failed. In this protocol, the action procedure can -return one of the following values: - -- ``OK``: the action execution was a complete success; -- ``ERROR``: the action execution has failed with error messages; -- ``RETRY``: the action execution has encountered some resource competition - situation, so the recommendation is to re-start the action if possible; -- ``CANCEL``: the action has received a ``CANCEL`` signal and thus has aborted - its execution; -- ``TIMEOUT``: the action has detected a timeout error when performing some - time consuming jobs. - -When the return value is ``OK``, the action status will be set to -``SUCCEEDED``; when the return value is ``ERROR`` or ``TIMEOUT``, the action -status will be set to ``FAILED``; when the return value is ``CANCEL``, the -action status will be set to ``CANCELLED``; finally, when the return value is -``RETRY``, the action status is reset to ``READY``, and the current worker -thread will release its lock on the action so that other threads can pick it -up when resources permit. - - -Creating An Action -~~~~~~~~~~~~~~~~~~ - -Currently, Senlin actions are mostly generated from within the Senlin engine, -either due to a RPC request, or due to another action's execution. - -In future, Senlin plans to support user-defined actions (UDAs). Senlin API will -provide API for creating an UDA and invoking an action which can be an UDA. - - -Listing Actions -~~~~~~~~~~~~~~~ - -Senlin provides an ``action_list`` API for users to query the action objects -in the Senlin database. Such a query request can be accompanied with the -following query parameters in the query string: - -- ``filters``: a map that will be used for filtering out records that fail to - match the criteria. The recognizable keys in the map include: - - * ``name``: the name of the actions where the value can be a string or a - list of strings; - * ``target``: the UUID of the object targeted by the action where the value - can be a string or a list of strings; - * ``action``: the builtin action for matching where the value can be a - string or a list of strings; - -- ``limit``: a number that restricts the maximum number of action records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of actions in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of an action as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. - - -Getting An Action -~~~~~~~~~~~~~~~~~ - -Senlin API provides the ``action_show`` API call for software or a user to -retrieve a specific action for examining its details. When such a query -arrives at the Senlin engine, the engine will search the database for the -``action_id`` specified. - -User can provide the UUID, the name or the short ID of an action as the -``action_id`` for query. The Senlin engine will try each of them in sequence. -When more than one action matches the criteria, an error message is returned -to user, or else the details of the action object is returned. - - -Signaling An Action -~~~~~~~~~~~~~~~~~~~ - -When an action is in ``RUNNING`` status, a user can send signals to it. A -signal is actually a word that will be written into the ``control`` field of -the ``action`` table in the database. - -When an action is capable of handling signals, it is supposed to check its -``control`` field in the DB table regularly and abort execution in a graceful -way. An action has the freedom to check or ignore these signals. In other -words, Senlin cannot guarantee that a signal will have effect on any action. - -The currently supported signal words are: - -- ``CANCEL``: this word indicates that the target action should cancel its - execution and return when possible; -- ``SUSPEND``: this word indicates that the target action should suspend its - execution when possible. The action doesn't have to return. As an - alternative, it can sleep waiting on a ``RESUME`` signal to continue its - work; -- ``RESUME``: this word indicates that the target action, if suspended, should - resume its execution. - -The support to ``SUSPEND`` and ``RESUME`` signals are still under development. diff --git a/doc/source/contributor/api_microversion.rst b/doc/source/contributor/api_microversion.rst deleted file mode 100644 index 59262a342..000000000 --- a/doc/source/contributor/api_microversion.rst +++ /dev/null @@ -1,374 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=================== -API Microversioning -=================== - -Background -~~~~~~~~~~ - -The *API Microversioning* is a framework in Senlin to enable smooth evolution -of the Senlin REST API while preserving its backward compatibility. The basic -idea is that a user has to explicitly specify the particular version of API -requested in the request. Disruptive changes to the API can then be added -without breaking existing users who don't specifically ask for it. This is -done with an HTTP header ``OpenStack-API-Version`` as suggested by the -OpenStack API Working Group. The value of the header should contain the -service name (``clustering``) and the desired API version which is a -monotonically increasing semantic version number starting from ``1.0``. - -If a user makes a request without specifying a version, they will get the -``DEFAULT_API_VERSION`` as defined in ``senlin.api.common.wsgi``. This value -is currently ``1.0`` and is expected to remain so for quite a long time. - -There is a special value "``latest``" which can be specified, which will allow -a client to always invoke the most recent version of APIs from the server. - -.. warning:: The ``latest`` value is mostly meant for integration testing and - would be dangerous to rely on in client code since Senlin microversions are - not following semver and therefore backward compatibility is not guaranteed. - Clients, like python-senlinclient or openstacksdk, python-openstackclient - should always require a specific microversion but limit what is acceptable - to the version range that it understands at the time. - - -When to Bump the Microversion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A microversion is needed when the contract to the user is changed. The user -contract covers many kinds of information such as: - -- the Request - - - the list of resource URLs which exist on the server - - Example: adding a new ``GET clusters/{ID}/foo`` resource which didn't exist - in a previous version of the code - - - the list of query parameters that are valid on URLs - - Example: adding a new parameter ``is_healthy`` when querying a node by - ``GET nodes/{ID}?is_healthy=True`` - - - the list of query parameter values for non-freeform fields - - Example: parameter ``filters`` takes a small set of properties "``A``", - "``B``", "``C``", now support for new property "``D``" is added - - - new headers accepted on a request - - - the list of attributes and data structures accepted. - - Example: adding a new attribute ``'locked': True/False`` to a request body - -- the Response - - - the list of attributes and data structures returned - - Example: adding a new attribute ``'locked': True/False`` to the output - of ``GET clusters/{ID}`` - - - the allowed values of non-freeform fields - - Example: adding a new allowed "``status``" field to ``GET servers/{ID}`` - - - the list of status codes allowed for a particular request - - Example: an API previously could return 200, 400, 403, 404 and the - change would make the API now also be allowed to return 409. - - - changing a status code on a particular response - - Example: changing the return code of an API from 501 to 400. - - .. note:: According to the OpenStack API Working Group, a - **500 Internal Server Error** should **NOT** be returned to the user for - failures due to user error that can be fixed by changing the request on - the client side. This kind of a fix doesn't require a change to the - microversion. - - - new headers returned on a response - -The following flow chart attempts to walk through the process of "do -we need a microversion". - - -.. graphviz:: - - digraph states { - - label="Do I need a microversion?" - - silent_fail[shape="diamond", style="", group=g1, label="Did we silently - fail to do what is asked?"]; - ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 - before?"]; - new_error[shape="diamond", style="", group=g1, label="Are we changing the - status code returned?"]; - new_attr[shape="diamond", style="", group=g1, label="Did we add or remove - an attribute to a resource?"]; - new_param[shape="diamond", style="", group=g1, label="Did we add or remove - an accepted query string parameter or value?"]; - new_resource[shape="diamond", style="", group=g1, label="Did we add or - remove a resource url?"]; - - - no[shape="box", style=rounded, label="No microversion needed"]; - yes[shape="box", style=rounded, label="Yes, you need a microversion"]; - no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; - - silent_fail -> ret_500[label=" no"]; - silent_fail -> no2[label="yes"]; - - ret_500 -> no2[label="yes [1]"]; - ret_500 -> new_error[label=" no"]; - - new_error -> new_attr[label=" no"]; - new_error -> yes[label="yes"]; - - new_attr -> new_param[label=" no"]; - new_attr -> yes[label="yes"]; - - new_param -> new_resource[label=" no"]; - new_param -> yes[label="yes"]; - - new_resource -> no[label=" no"]; - new_resource -> yes[label="yes"]; - - {rank=same; yes new_attr} - {rank=same; no2 ret_500} - {rank=min; silent_fail} - } - - -.. NOTE:: The reason behind such a strict contract is that we want application - developers to be sure what the contract is at every microversion in Senlin. - - When in doubt, consider application authors. If it would work with no client - side changes on both Nova versions, you probably don't need a microversion. - If, however, there is any ambiguity, a microversion is likely needed. - - -When a Microversion Is Not Needed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A microversion is not needed in the following situations: - -- the response - - - Changing the error message without changing the response code does not - require a new microversion. - - - Removing an inapplicable HTTP header, for example, suppose the Retry-After - HTTP header is being returned with a 4xx code. This header should only be - returned with a 503 or 3xx response, so it may be removed without bumping - the microversion. - - -Working with Microversions -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the ``senlin.api.common.wsgi`` module, we define an ``@api_version`` -decorator which is intended to be used on top-level methods of controllers. -It is not appropriate for lower-level methods. - - -Adding a New API Method ------------------------ - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("2.4") - def my_api_method(self, req, id): - .... - -This method is only available if the caller had specified a request header -``OpenStack-API-Version`` with value ``clustering `` and ```` is >= -``2.4``. If they had specified a lower version (or omitted it thus got the -default of ``1.0``) the server would respond with HTTP 404. - - -Removing an API Method ----------------------- - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("2.1", "2.4") - def my_api_method(self, req, id): - .... - -This method would only be available if the caller had specified an -``OpenStack-API-Version`` with value ``clustering `` and the ```` is -<= ``2.4``. If ``2.5`` or later is specified the server will respond with -HTTP 404. - - -Changing a Method's Behavior ----------------------------- - -In the controller class: - -.. code-block:: python - - @wsgi.Controller.api_version("1.0", "2.3") - def my_api_method(self, req, id): - .... method_1 ... - - @wsgi.Controller.api_version("2.4") # noqa - def my_api_method(self, req, id): - .... method_2 ... - -If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of -``1.0``) they would see the result from ``method_1``, ``2.4`` or later -``method_2``. - -It is vital that the two methods have the same name, so the second one will -need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may -be different in any kind of semantics (schema validation, return values, -response codes, etc.) - - -When Not Using Decorators -------------------------- - -When you don't want to use the ``@api_version`` decorator on a method or you -want to change behavior within a method (say it leads to simpler or simply a -lot less code) you can directly test for the requested version with a method -as long as you have access to the API request object. Every API method has an -``version_request`` object attached to the ``Request`` object and that can be -used to modify behavior based on its value: - -.. code-block:: python - - import senlin.api.common.version_request as vr - - def index(self, req): - # common code ... - - req_version = req.version_request - req1_min = vr.APIVersionRequest("2.1") - req1_max = vr.APIVersionRequest("2.5") - req2_min = vr.APIVersionRequest("2.6") - req2_max = vr.APIVersionRequest("2.10") - - if req_version.matches(req1_min, req1_max): - # stuff... - elif req_version.matches(req2min, req2_max): - # other stuff... - elif req_version > vr.APIVersionRequest("2.10"): - # more stuff... - - # common code ... - -The first argument to the matches method is the minimum acceptable version -and the second is maximum acceptable version. A specified version can be null: - -.. code-block:: python - - null_version = APIVersionRequest() - -If the minimum version specified is null then there is no restriction on -the minimum version, and likewise if the maximum version is null there -is no restriction the maximum version. Alternatively an one sided comparison -can be used as in the example above. - - -Planning and Committing Changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once the idea of an API change is discussed with the core team and the -consensus has been reached to bump the micro-version of Senlin API, you can -start working on the changes in the following order: - -1. Prepare the engine and possibly the action layer for the change. One STRICT - requirement is that the newly proposed change(s) should not break any - existing users. - -2. Add a new versioned object if a new API is introduced; or modify the fields - of an existing object representing the API request. You are expected to - override the ``obj_make_compatible()`` method to ensure the request formed - will work on an older version of engine. - -3. If the change is about modifying an existing API, you will need to bump the - version of the request object. You are also required to add or change the - ``VERSION_MAP`` dictionary of the request object class where the key is the - API microversion and the value is the object version. For example: - -.. code-block:: python - - @base.SenlinObjectRegistry.register - class ClusterDanceRequest(base.SenlinObject): - - # VERSION 1.0: Initial version - # VERSION 1.1: Add field 'style' - VERSION = '1.1' - VERSION_MAP = { - 'x.y': '1.1' - } - - fields = { - ... - 'style': fields.StringField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - # add the logic to convert the request for a target version - ... - - -4. Patch the API layer to introduce the change. This involves changing the - ``senlin/api/openstack/history.rst`` file to include the descriptive - information about the changes made. - -5. Revise the API reference documentation so that the changes are properly - documented. - -6. Add a release note entry for the API change. - -7. Add tempest based API test and functional tests. - -8. Update ``_MAX_API_VERSION`` in ``senlin.api.openstack.versions``, if needed. - Note that each time we bump the API microversion, we may introduce two or - more changes rather than one single change, the update of - ``_MAX_API_VERSION`` needs to be done only once if this is the case. - -9. Commit patches to the ``openstacksdk`` project so that new API - changes are accessible from client side. - -10. Wait for the new release of ``openstacksdk`` project that includes - the new changes and then propose changes to ``python-senlinclient`` - project. - - -Allocating a microversion -~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are adding a patch which adds a new microversion, it is necessary to -allocate the next microversion number. Except under extremely unusual -circumstances, the minor number of ``_MAX_API_VERSION`` will be incremented. -This will also be the new microversion number for the API change. - -It is possible that multiple microversion patches would be proposed in -parallel and the microversions would conflict between patches. This will -cause a merge conflict. We don't reserve a microversion for each patch in -advance as we don't know the final merge order. Developers may need over time -to rebase their patch calculating a new version number as above based on the -updated value of ``_MAX_API_VERSION``. - - -.. include:: ../../../senlin/api/openstack/history.rst diff --git a/doc/source/contributor/authorization.rst b/doc/source/contributor/authorization.rst deleted file mode 100644 index 3b1d93043..000000000 --- a/doc/source/contributor/authorization.rst +++ /dev/null @@ -1,191 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Senlin Authorization -==================== - -As a service to be consumed by end users and possibly other IT persons, Senlin -has some basic components and strategies to manage access control. The design -is meant to be as open as possible though the current focus as this document is -drafted is on enabling Keystone-based (aka. token-based) OpenStack -authorization. - -This document presents an overview of the authentication and authorization -mechanisms provided by the Senlin API and its service engine. The top-most -design consideration of these mechanisms is to make it accommodating so that -the interactions with different authentication engines can be done using the -same framework. The reason behind this decision is to make Senlin cloud-backend -agnostic so it can be used to support clustering of resources in a multi-cloud, -or multi-region, or multi-availability-zone setups. - - -Major Components -~~~~~~~~~~~~~~~~ - -In the context of an OpenStack cloud, the most important components involved in -the authentication and the authorization process are: - -- The Senlin client (i.e. the `python-senlinclient` package) which accepts - user credentials provided through environment variables and/or the command - line arguments and forwards them to the OpenStack SDK (i.e. the - `openstacksdk` package) when making service requests to Senlin API. -- The OpenStack SDK (`openstacksdk`) is used by Senlin engine to - interact with any other OpenStack services. The Senlin client also uses the - SDK to talk to the Senlin API. The SDK package translates the user-provided - credentials into a token by invoking the Keystone service. -- The Keystone middleware (i.e. `keystonemiddleware`) which backs the - `auth_token` WSGI middleware in the Senlin API pipeline provides a basic - validation filter. The filter is responsible to validate the token that - exists in the HTTP request header and then populates the HTTP request header - with detailed information for the downstream filters (including the API - itself) to use. -- The `context` WSGI middleware which is based on the `oslo.context` package - provides a constructor of the `RequestContext` data structure that - accompanies any requests down the WSGI application pipeline so that those - downstream components don't have to access the HTTP request header. - - -Usage Scenarios -~~~~~~~~~~~~~~~ - -There are several ways to raise a service request to the Senlin API, each of -which has its own characteristics that will affect the way authentication -and/or authorization is performed. - -1) Users interact with Senlin service API using the OpenStack client (i.e. the - plugin provided by the `python-senlinclient` package). The requests, after - being preprocessed by the OpenStack SDK will contain a valid Keystone token - that can be validated by the `auth_token` WSGI middleware. -2) Users interact with Senlin service API directly by making HTTP requests - where the requester's credentials have been validated by Keystone so the - requests will carry a valid Keystone token for verification by the - `auth_token` middleware as well. -3) Users interact with Senlin service API directly by making HTTP requests, but - the requests are "naked" ones which mean that the requests do not contain - credentials as expected by Senlin API (or other OpenStack services). In - stead, the URI requested contains some special parameters for authentication - and/or authorization's purposes. - -Scenario 1) and 2) are the most common ways for users to use Senlin API. They -share the same request format when the request arrives at the Senlin API -endpoint. Scenario 3) is a little bit different. What Senlin wants to achieve -is making no assumption where the service requests come from. That means it -cannot assume that the requester (could be any program) will fill in the -required headers in their service requests. One example of such use cases is -the Webhook API Senlin provides that enables a user to trigger an action on an -object managed by Senlin. Senlin provides a special support to these use cases. - - -Operation Delegation (Trusts) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Since Senlin models most operations as "Actions" that can be executed by -worker threads asynchronously, these operations have to be done on behalf of -the requester so that they can be properly traced, authenticated, audited or -logged. - - -Credentials and Context ------------------------ - -A generic solution to the delegation problem is to ask users to provide their -credentials to Senlin so Senlin can impersonate them when interacting with -other services. In fact, this may be the only solution that can be applied on -different cloud backends. - -Senlin supports a `context` property for all "profile" types by default unless -overridden by a profile type implementation. This context can be treated as a -container for these credentials. Storing user credential in Senlin database -does imply a security risk. In future, we hope Senlin can make use of the -Barbican service for this purpose. - -Senlin's implementation of context is based on the `oslo_context` package. -There is still room for improvement thanks to the new enhancements to that -package. - - -Trusts: Dealing with Token Expiration -------------------------------------- - -In some cases, the solution above may be impractical because after the -client-side processing and/or the front-end middleware filtering, Senlin -cannot get the original user credentials (e.g. user name and password). -Senlin can only get a "token", which expires in an hour by default. This means -that after no more than one hour, Senlin won't be able to use this token for -authentication/authorization. - -The OpenStack identity service (a.k.a Keystone) has considered this situation -and provided a solution. When a requester wants to delegate his/her roles in a -project to a 3rd party, he or she can create a "Trust" relationship between -him/her (the trustor) and that 3rd party (the trustee). The "Trust" has a -unique ID that can be used by the trustee when authenticating with Keystone. -Once trust ID is authenticated, the trustee can perform operations on behalf -of the trustor. - -The trust extension in Keystone V3 can be used to solve the token expiration -problem. There are two ways to do this as shown below. - -1) Requester Created Trusts: Before creating a profile, a requester can create - a trust with the trustee set to the `senlin` user. He or she can customize - the roles that can be assumed by `senlin`, which can be a subset of the - roles the requester currently has in that project. When the requester later - on creates a profile, he or she can provide the `trust_id` as a key of the - `context` property. Senlin can later on use this trust for authentication - and authorization's purpose. -2) Senlin Created Trusts: The solution above adds some burdens for an end user. - In order to make Senlin service easy of use, Senlin will do the trust - creation in the background. Whenever a new request comes in, Senlin will - check if there is an existing trust relationship between the requester and - the `senlin` user. Senlin will "hijack" the user's token and create a trust - with `senlin` as the trustee. This trust relationship is currently stored - in Senlin database, and the management of this sensitive information can be - delegated to Barbican as well in future. - - -Precedence Consideration ------------------------- - -Since there now exist more than one place for Senlin to get the credentials -for use, Senlin needs to impose a precedence among the credential sources. - -When Senlin tries to contact a cloud service via a driver, the requests are -issued from a subclass of `Profile`. Senlin will check the `user` property of -the targeted cluster or node and retrieve the trust record from database using -the `user` as the key. By default, Senlin will try obtain a new token from -Keystone using the `senlin` user's credentials (configured in `senlin.conf` -file) and the `trust_id`. Before doing that, Senlin will check if the profile -used has a "customized" `context`. If there are credentials such as `password` -or `trust_id` in the context, Senlin deletes its current `trust_id` from the -context, and adds the credentials found in the profile into the context. - -In this way, a user can specify the credentials Senlin should use when talking -to other cloud services by customizing the `context` property of a profile. -The specified credentials may and may not belong to the requester. - - -Trust Middleware ----------------- - -When a service request arrives at Senlin API, Senlin API checks if there is a -trust relationship built between the requester user and the `senlin` user. A -new trust is created if no such record is found. - -Once a trust is found or created, the `trust_id` is saved into the current -`context` data structure. Down the invocation path, or during asynchronous -action executions, the `trust_id` will be used for token generation when -needed. - -Senlin provides an internal database table to store the trust information. It -may be removed in future when there are better ways to handle this sensitive -information. diff --git a/doc/source/contributor/cluster.rst b/doc/source/contributor/cluster.rst deleted file mode 100644 index 6cae77fc9..000000000 --- a/doc/source/contributor/cluster.rst +++ /dev/null @@ -1,624 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== -Clusters -======== - -Clusters are first-class citizens in Senlin service design. A cluster is -defined as a collection of homogeneous objects. The "homogeneous" here means -that the objects managed (aka. Nodes) have to be instantiated from the same -"profile type". - -A cluster can contain zero or more nodes. Senlin provides REST APIs for users -to create, retrieve, update, delete clusters. Using these APIs, a user can -manage the node membership of a cluster. - -A cluster is owned by a user (the owner), and it is accessible from within the -Keystone project (tenant) which is the default project of the user. - -A cluster has the following timestamps when instantiated: - -- ``init_at``: the timestamp when a cluster object is initialized in the - Senlin database, but the actual cluster creation has not yet started; -- ``created_at``: the timestamp when the cluster object is created, i.e. - the ``CLUSTER_CREATE`` action has completed; -- ``updated_at``: the timestamp when the cluster was last updated. - - -Cluster Statuses -~~~~~~~~~~~~~~~~ - -A cluster can have one of the following statuses during its lifecycle: - -- ``INIT``: the cluster object has been initialized, but not created yet; -- ``ACTIVE``: the cluster is created and providing service; -- ``CREATING``: the cluster creation action is still on going; -- ``ERROR``: the cluster is still providing services, but there are things - going wrong that needs human intervention; -- ``CRITICAL``: the cluster is not operational, it may or may not be - providing services as expected. Senlin cannot recover it from its current - status. The best way to deal with this cluster is to delete it and then - re-create it if needed. -- ``DELETING``: the cluster deletion is ongoing; -- ``WARNING``: the cluster is operational, but there are some warnings - detected during past operations. In this case, human involvement is - suggested but not required. -- ``UPDATING``: the cluster is being updated. - -Along with the ``status`` property, Senlin provides a ``status_reason`` -property for users to check what is the cause of the cluster's current status. - -To avoid frequent databases accesses, a cluster object has a runtime data -property named ``rt`` which is a Python dictionary. The property caches the -profile referenced by the cluster, the list of nodes in the cluster and the -policies attached to the cluster. The runtime data is not directly visible to -users. It is merely a convenience for cluster operations. - - -Creating A Cluster -~~~~~~~~~~~~~~~~~~ - -When creating a cluster, the Senlin API will verify whether the request -carries a body with valid, sufficient information for the engine to complete -the creation job. The following fields are required in a map named ``cluster`` -in the request JSON body: - -- ``name``: the name of the cluster to be created; -- ``profile``: the name or ID or short-ID of a profile to be used; -- ``desired_capacity``: the desired number of nodes in the cluster, which is - treated also as the initial number of nodes to be created. - -The following optional fields can be provided in the ``cluster`` map in the -JSON request body: - -- ``min_size``: the minimum number of nodes inside the cluster, default - value is 0; -- ``max_size``: the maximum number of nodes inside the cluster, default - value is -1, which means there is no upper limit on the number of nodes; -- ``timeout``: the maximum number of seconds to wait for the cluster to - become ready, i.e. ``ACTIVE``. -- ``metadata``: a list of key-value pairs to be associated with the cluster. -- ``dependents``: A dict contains dependency information between nova server/ - heat stack cluster and container cluster. The container node's id will be - stored in 'dependents' property of its host cluster. - -The ``max_size`` and the ``min_size`` fields, when specified, will be checked -against each other by the Senlin API. The API also checks if the specified -``desired_capacity`` falls out of the range [``min_size``, ``max_size``]. If -any verification failed, a ``HTTPBadRequest`` exception is thrown and the -cluster creation request is rejected. - -A cluster creation request is then forwarded to the Senlin RPC engine for -processing, where the engine creates an Action for the request and queues it -for any worker threads to execute. Once the action is queued, the RPC engine -returns the current cluster properties in a map to the API. Along with these -properties, the engine also returns the UUID of the Action that will do the -real job of cluster creation. A user can check the status of the action to -determine whether the cluster has been successfully completed or failed. - - -Listing Clusters -~~~~~~~~~~~~~~~~ - -Clusters in the current project can be queried using some query parameters. -None of these parameters is required. By default, the Senlin API will return -all clusters that are not deleted. - -When listing clusters, the following query parameters can be specified, -individually or combined: - -- ``filters``: a map containing key-value pairs for matching. Records that - fail to match the criteria will be filtered out. The valid keys in this map - include: - - * ``name``: name of clusters to list, can be a string or a list of strings; - * ``status``: status of clusters, can be a string or a list of strings; - -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of clusters in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a cluster as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``global_project``: A boolean indicating whether cluster listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only clusters from the current project that match the other - criteria will be returned. When this value is specified as True, clusters - that matching all other criteria would be returned, no matter in which - project a cluster was created. Only a user with admin privilege is permitted - to do a global listing. - - -Getting a Cluster -~~~~~~~~~~~~~~~~~ - -When a user wants to check the details about a specific cluster, he or she can -specify one of the following keys for query: - -- cluster UUID: Clusters are queried strictly based on the UUID given. This is - the most precise query supported. -- cluster name: Senlin allows multiple clusters to have the same name. It is - user's responsibility to avoid name conflicts if needed. The output may be - the details of a cluster if the cluster name is unique, or else Senlin will - return a message telling users that multiple clusters found matching the - specified name. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching cluster is - returned, or else Senlin will return an error message indicating that more - than one cluster matching the short ID have been found. - -Senlin engine service will try the above three ways in order to find a match -in database. - -In the returned result, Senlin injects a list of node IDs for nodes in the -cluster. It also injects the name of the profile used by the cluster. These -are all for user's convenience. - - -Updating A Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be updated upon user's requests. In theory, all properties of a -cluster could be updated/changed. However, some update operations are light --weight ones, others are heavy weight ones. This is because the semantics of -properties differ a lot from each other. Currently, cluster profile related -changes and cluster size related changes are heavy weight because they may -induce a chain of operations on the cluster. Updating other properties are -light weight operations. - -In the JSON body of a ``cluster_update`` request, users can specify new values -for the following properties: - -- ``name``: new cluster name; -- ``profile_id``: ID or name or short ID of a profile object to use; -- ``metadata``: a list of key-value pairs to be associated with the cluster, - this dict will be merged with the existing key-value pairs based on keys. -- ``desired_capacity``: new *desired* size for the cluster; -- ``min_size``: new lower bound for the cluster size; -- ``max_size``: new upper bound for the cluster size. -- ``timeout``: new timeout value for the specified cluster. -- ``profile_only``: a boolean value indicating whether cluster will be only - updated with profile. - - -Update Cluster's Profile ------------------------- - -When ``profile_id`` is specified, the request will be interpreted as a -wholistic update to all nodes across the cluster. The targeted use case is to -do a cluster wide system upgrade. For example, replacing glance images used by -the cluster nodes when new kernel patches have been applied or software -defects have been fixed. - -When receiving such an update request, the Senlin engine will check if the new -profile referenced does exist and whether the new profile has the same profile -type as that of the existing profile. Exceptions will be thrown if any -verification has failed and thus the request is rejected. - -After the engine has validated the request, an Action of ``CLUSTER_UPDATE`` is -created and queued internally for execution. Later on, when a worker thread -picks up the action for execution, it will first lock the whole cluster and -mark the cluster status as ``UPDATING``. It will then fork ``NODE_UPDATE`` -actions per node inside the cluster, which are in turn queued for execution. -Other worker threads will pick up the node level update action for execution -and mark the action as completed/failed. When all these node level updates are -completed, the ``CLUSTER_UPDATE`` operation continues and marks the cluster as -``ACTIVE`` again. - -Senlin also provides a parameter ``profile_only`` for this action, so that any -newly created nodes will use the new profile, but existing nodes should not be -changed. - -The cluster update operation may take a long time to complete, depending on -the response time from the underlying profile operations. Note also, when -there is a update policy is attached to the cluster and enabled, the update -operation may be split into several batches so that 1) there is a minimum -number of nodes remained in service at any time; 2) the pressure on the -underlying service is controlled. - - -Update Cluster Size Properties ------------------------------- - -When either one of the ``desired_capacity``, ``min_size`` and ``max_size`` -property is specified in the ``CLUSTER_UPDATE`` request, it may lead to a -resize operation on the cluster. - -The Senlin API will do a preliminary validation upon the new property values. -For example, if both ``min_size`` and ``max_size`` are specified, they have to -be integers and the value for ``max_size`` is greater than the value for -``min_size``, unless the value of ``max_size`` is -1 which means the upper -bound of cluster size is unlimited. - -When the request is then received by the Senlin engine, the engine first -retrieves the cluster properties from the database and do further -cross-verifications between the new property values and the current values. -For example, it is treated as an invalid request if a user has specified value -for ``min_size`` but no value for ``max_size``, however the new ``min_size`` -is greater than the existing ``max_size`` of the cluster. In this case, the -user has to provide a valid ``max_size`` to override the existing value, or -he/she has to lower the ``min_size`` value so that the request becomes -acceptable. - -Once the cross-verification has passed, Senlin engine will calculate the new -``desired_capacity`` and adjust the size of the cluster if deemed necessary. -For example, when the cluster size is below the new ``min_size``, new nodes -will be created and added to the cluster; when the cluster size is above the -new ``max_size``, some nodes will be removed from the cluster. If the -``desired_capacity`` is set and the property value falls between the new range -of cluster size, Senlin tries resize the cluster to the ``desired_capacity``. - -When the size of the cluster is adjusted, Senlin engine will check if there -are relevant policies attached to the cluster so that the engine will add -and/or remove nodes in a predictable way. - - -Update Other Cluster Properties -------------------------------- - -The update to other cluster properties is relatively straightforward. Senlin -engine simply verifies the data types when necessary and override the existing -property values in the database. - -Note that in the cases where multiple properties are specified in a single -``CLUSTER_UPDATE`` request, some will take a longer time to complete than -others. Any mixes of update properties are acceptable to the Senlin API and -the engine. - - -Cluster Actions -~~~~~~~~~~~~~~~ - -A cluster object supports the following asynchronous actions: - -- ``add_nodes``: add a list of nodes into the target cluster; -- ``del_nodes``: remove the specified list of nodes from the cluster; -- ``replace_nodes``: replace the specified list of nodes in the cluster; -- ``resize``: adjust the size of the cluster; -- ``scale_in``: explicitly shrink the size of the cluster; -- ``scale_out``: explicitly enlarge the size of the cluster. -- ``policy_attach``: attach a policy object to the cluster; -- ``policy_detach``: detach a policy object from the cluster; -- ``policy_update``: modify the settings of a policy that is attached to the - cluster. - -The ``scale_in`` and the ``scale_out`` actions are subject to change in future. -We recommend using the unified ``CLUSTER_RESIZE`` action for cluster size -adjustments. - -Software or a user can trigger a ``cluster_action`` API to issue an action -for Senlin to perform. In the JSON body of these requests, Senlin will verify -if the top-level key contains *one* of the above actions. When no valid action -name is found or more than one action is specified, the API will return error -messages to the caller and reject the request. - - -Adding Nodes to a Cluster -------------------------- - -Senlin API provides the ``add_nodes`` action for user to add some existing -nodes into the specified cluster. The parameter for this action is interpreted -as a list in which each item is the UUID, name or short ID of a node. - -When receiving an ``add_nodes`` action request, the Senlin API only validates -if the parameter is a list and if the list is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the list one by one and see if any of -the following conditions is true. Senlin engine rejects the request if so. - -- Any node from the list is not in ``ACTIVE`` state? -- Any node from the list is still member of another cluster? -- Any node from the list is not found in the database? -- Number of nodes to add is zero? - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_ADD_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin checks -if the profile type of the nodes to be added matches that of the cluster. -Finally, a number of ``NODE_JOIN`` action is forked and executed from the -``CLUSTER_ADD_NODES`` action. When ``NODE_JOIN`` actions complete, the -``CLUSTER_ADD_NODES`` action returns with success. - -In the cases where there are load-balancing policies attached to the cluster, -the ``CLUSTER_ADD_NODES`` action will save the list of UUIDs of the new nodes -into the action's ``data`` field so that those policies could update the -associated resources. - - -Deleting Nodes from a Cluster ------------------------------ - -Senlin API provides the ``del_nodes`` action for user to delete some existing -nodes from the specified cluster. The parameter for this action is interpreted -as a list in which each item is the UUID, name or short ID of a node. - -When receiving a ``del_nodes`` action request, the Senlin API only validates -if the parameter is a list and if the list is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the list one by one and see if any of -the following conditions is true. Senlin engine rejects the request if so. - -- Any node from the list cannot be found from the database? -- Any node from the list is not member of the specified cluster? -- Number of nodes to delete is zero? - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_DEL_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin forks a -number of ``NODE_DELETE`` actions and execute them asynchronously. When all -forked actions complete, the ``CLUSTER_DEL_NODES`` returns with a success. - -In the cases where there are load-balancing policies attached to the cluster, -the ``CLUSTER_DEL_NODES`` action will save the list of UUIDs of the deleted -nodes into the action's ``data`` field so that those policies could update the -associated resources. - -If a deletion policy with hooks property is attached to the cluster, the -``CLUSTER_DEL_NODES`` action will create the ``CLUSTER_DEL_NODES`` actions -in ``WAITING_LIFECYCLE_COMPLETION`` status which does not execute them. It -also sends the lifecycle hook message to the target specified in the -deletion policy. If the complete lifecylcle API is called for a -``CLUSTER_DEL_NODES`` action, it will be executed. If all the -``CLUSTER_DEL_NODES`` actions are not executed before the hook timeout -specified in the deletion policy is reached, the remaining -``CLUSTER_DEL_NODES`` actions are moved into ``READY`` status and scheduled -for execution. When all actions complete, the ``CLUSTER_DEL_NODES`` -returns with a success. - -Note also that by default Senlin won't destroy the nodes that are deleted -from the cluster. It simply removes the nodes from the cluster so that they -become orphan nodes. -Senlin also provides a parameter ``destroy_after_deletion`` for this action -so that a user can request the deleted node(s) to be destroyed right away, -instead of becoming orphan nodes. - - -Replacing Nodes in a Cluster ----------------------------- - -Senlin API provides the ``replace_nodes`` action for user to replace some existing -nodes in the specified cluster. The parameter for this action is interpreted -as a dict in which each item is the node-pair{OLD_NODE:NEW_NODE}. The key OLD_NODE -is the UUID, name or short ID of a node to be replaced, and the value NEW_NODE is -the UUID, name or short ID of a node as replacement. - -When receiving a ``replace_nodes`` action request, the Senlin API only validates -if the parameter is a dict and if the dict is empty. After this validation, -the request is forwarded to the Senlin engine for processing. - -The Senlin engine will examine nodes in the dict one by one and see if all of -the following conditions is true. Senlin engine accepts the request if so. - -- All nodes from the list can be found from the database. -- All replaced nodes from the list are the members of the specified cluster. -- All replacement nodes from the list are not the members of any cluster. -- The profile types of all replacement nodes match that of the specified - cluster. -- The statuses of all replacement nodes are ACTIVE. - -When this phase of validation succeeds, the request is translated into a -``CLUSTER_REPLACE_NODES`` builtin action and queued for execution. The engine -returns to the user an action UUID for checking. - -When the action is picked up by a worker thread for execution, Senlin forks a -number of ``NODE_LEAVE`` and related ``NODE_JOIN`` actions, and execute them -asynchronously. When all forked actions complete, the ``CLUSTER_REPLACE_NODES`` -returns with a success. - - -Resizing a Cluster ------------------- - -In addition to the ``cluster_update`` request, Senlin provides a dedicated API -for adjusting the size of a cluster, i.e. ``cluster_resize``. This operation -is designed for the auto-scaling and manual-scaling use cases. - -Below is a list of API parameters recognizable by the Senlin API when parsing -the JSON body of a ``cluster_resize`` request: - -- ``adjustment_type``: type of adjustment to be performed where the value - should be one of the followings: - - * ``EXACT_CAPACITY``: the adjustment is about the targeted size of the - cluster; - * ``CHANGE_IN_CAPACITY``: the adjustment is about the number of nodes to be - added or removed from the cluster and this is the default setting; - * ``CHANGE_IN_PERCENTAGE``: the adjustment is about a relative percentage of - the targeted cluster. - - This field is mandatory. -- ``number``: adjustment number whose value will be interpreted base on the - value of ``adjustment_type``. This field is mandatory. -- ``min_size``: the new lower bound for the cluster size; -- ``max_size``: the new upper bound for the cluster size; -- ``min_step``: the minimum number of nodes to be added or removed when the - ``adjustment_type`` is set to ``CHANGE_IN_PERCENTAGE`` and the absolute - value computed is less than 1; -- ``strict``: a boolean value indicating whether the service should do a - best-effort resizing operation even if the request cannot be fully met. - -For example, the following request is about increasing the size of the cluster -by 20% and Senlin can try a best-effort if the calculated size is greater than -the upper limit of the cluster size: - -:: - - { - "adj_type": "CHANGE_IN_PERCENTAGE", - "number": "20", - "strict": False, - } - -When Senlin API receives a ``cluster_resize`` request, it first validates the -data type of the values and the sanity of the value collection. For example, -you cannot specify a ``min_size`` greater than the current upper bound (i.e. -the ``max_size`` property of the cluster) if you are not providing a new -``max_size`` that is greater than the ``min_size``. - -After the request is forwarded to the Senlin engine, the engine will further -validates the parameter values against the targeted cluster. When all -validations pass, the request is converted into a ``CLUSTER_RESIZE`` action -and queued for execution. The API returns the cluster properties and the UUID -of the action at this moment. - -When executing the action, Senlin will analyze the request parameters and -determine the operations to be performed to meet user's requirement. The -corresponding cluster properties are updated before the resize operation -is started. - - -Scaling in/out a Cluster ------------------------- - -As a convenience method, Senlin provides the ``scale_out`` and the ``scale_in`` -action API for clusters. With these two APIs, a user can request a cluster to -be resized by the specified number of nodes. - -The ``scale_out`` and the ``scale_in`` APIs both take a parameter named -``count`` which is a positive integer. The integer parameter is optional, and -it specifies the number of nodes to be added or removed if provided. When it -is omitted from the request JSON body, Senlin engine will check if the cluster -has any relevant policies attached that will decide the number of nodes to be -added or removed respectively. The Senlin engine will use the outputs from -these policies as the number of nodes to create (or delete) if such policies -exist. When the request does contain a ``count`` parameter and there are -policies governing the scaling arguments, the ``count`` parameter value may -be overridden/ignored. - -When a ``scale_out`` or a ``scale_in`` request is received by the Senlin -engine, a ``CLUSTER_SCALE_OUT`` or a ``CLUSTER_SCALE_IN`` action is then -created and queued for execution after some validation of the parameter value. - -A worker thread picks up the action and execute it. The worker will check if -there are outputs from policy checkings. For ``CLUSTER_SCALE_OUT`` actions, -the worker checks if the policies checked has left a ``count`` key in the -dictionary named ``creation`` from the action's runtime ``data`` attribute. -The worker will use such a ``count`` value for node creation. For a -``CLUSTER_SCALE_OUT`` action, the worker checks if the policies checked has -left a ``count`` key in the dictionary named ``deletion`` from the action's -runtime ``data`` attribute. The worker will use such a ``count`` value for -node deletion. - -Note that both ``scale_out`` and ``scale_in`` actions will adjust the -``desired_capacity`` property of the target cluster. - - -Cluster Policy Bindings -~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin API provides the following action APIs for managing the binding -relationship between a cluster and a policy: - -- ``policy_attach``: attach a policy to a cluster; -- ``policy_detach``: detach a policy from a cluster; -- ``policy_update``: update the properties of the binding between a cluster - and a policy. - - -Attaching a Policy to a Cluster -------------------------------- - -Once a policy is attached (bound) to a cluster, it will be enforced when -related actions are performed on that cluster, unless the policy is -(temporarily) disabled on the cluster. - -When attaching a policy to a cluster, the following properties can be -specified: - -- ``enabled``: a boolean indicating whether the policy should be enabled on - the cluster once attached. Default is True. When specified, it will override - the default setting for the policy. - -Upon receiving the ``policy_attach`` request, the Senlin engine will perform -some validations then translate the request into a ``CLUSTER_ATTACH_POLICY`` -action and queue the action for execution. The action's UUID is then returned -to Senlin API and finally the requestor. - -When the engine executes the action, it will try find if the policy is already -attached to the cluster. This checking was not done previously because the -engine must ensure that the cluster has been locked before this checking, or -else there might be race conditions. - -The engine calls the policy's ``attach`` method when attaching the policy and -record the binding into database if the ``attach`` method returns a positive -response. - -Currently, Senlin does not allow two policies of the same type to be attached -to the same cluster. This constraint may be relaxed in future, but for now, it -is checked and enforced before a policy gets attached to a cluster. - -Policies attached to a cluster are cached at the target cluster as part of its -runtime ``rt`` data structure. This is an optimization regarding DB queries. - - -Detaching a Policy from a Cluster ---------------------------------- - -Once a policy is attached to a cluster, it can be detached from the cluster at -user's request. The only parameter required for the ``policy_detach`` action -API is ``policy_id``, which can be the UUID, the name or the short ID of the -policy. - -Upon receiving a ``policy_detach`` request, the Senlin engine will perform -some validations then translate the request into a ``CLUSTER_DETACH_POLICY`` -action and queue the action for execution. The action's UUID is then returned -to Senlin API and finally the requestor. - -When the Senlin engine executes the ``CLUSTER_DETACH_POLICY`` action, it will -try find if the policy is already attached to the cluster. This checking was -not done previously because the engine must ensure that the cluster has been -locked before this checking, or else there might be race conditions. - -The engine calls the policy's ``detach`` method when detaching the policy from -the cluster and then removes the binding record from database if the -``detach`` method returns a True value. - -Policies attached to a cluster are cached at the target cluster as part of its -runtime ``rt`` data structure. This is an optimization regarding DB queries. -The ``CLUSTER_DETACH_POLICY`` action will invalidate the cache when detaching -a policy from a cluster. - - -Updating a Policy on a Cluster ------------------------------- - -When a policy is attached to a cluster, there are some properties pertaining -to the binding. These properties can be updated as long as the policy is still -attached to the cluster. The properties that can be updated include: - -- ``enabled``: a boolean value indicating whether the policy should be enabled - or disabled. There are cases where some policies have to be temporarily - disabled when other manual operations going on. - -Upon receiving the ``policy_update`` request, Senlin API performs some basic -validations on the parameters passed. - -Senlin engine translates the ``policy_update`` request into an action -``CLUSTER_UPDATE_POLICY`` and queue it for execution. The UUID of the action -is then returned to Senlin API and eventually the requestor. - -During execution of the ``CLUSTER_UPDATE_POLICY`` action, Senlin engine -simply updates the binding record in the database and returns. diff --git a/doc/source/contributor/event_dispatcher.rst b/doc/source/contributor/event_dispatcher.rst deleted file mode 100644 index b362bb0b7..000000000 --- a/doc/source/contributor/event_dispatcher.rst +++ /dev/null @@ -1,125 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -================= -Event Dispatchers -================= - -An event :term:`dispatcher` is a processor that converts a given action in -Senlin engine into certain format and then persists it into some storage or -sends it to downstream processing software. - -Since version 3.0.0, Senlin comes with some built-in dispatchers that can -dump event records into database and/or send event notifications via the -default message queue. The former is referred to as the ``database`` dispatcher -which is enabled by default; the latter is referred to as the ``message`` -dispatcher which has to be manually enabled by adding the following line to -the ``senlin.conf`` file:: - - event_dispatchers = message - -However, the distributors or the users can always add their own event -dispatchers easily when needed. - -Event dispatchers are managed as Senlin plugins. Once a new event dispatcher -is implemented, a deployer can enable it by first adding a new item to the -``senlin.dispatchers`` entries in the ``entry_points`` section of the -``setup.cfg`` file, followed by a reinstall of the Senlin service, i.e. -``sudo pip install`` command. - - -The Base Class ``EventBackend`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -All event dispatchers are expected to subclass the base class ``EventBackend`` -in the ``senlin.events.base`` module. The only requirement for a dispatcher -subclass is to override the ``dump()`` method that implements the processing -logic. - - -Providing New Dispatchers -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Developing A New Event Dispatcher ---------------------------------- - -The first step for adding a new dispatcher is to create a new file containing -a subclass of ``EventBackend``. In this new class, say ``JsonDispatcher``, -you will need to implement the ``dump()`` class method as exemplified below: - -.. code-block:: python - - class JsonDispatcher(base.EventBackend): - """Dispatcher for dumping events to a JSON file.""" - - @classmethod - def dump(cls, level, action, **kwargs): - # Your logic goes here - ... - -The ``level`` parameter for the method is identical to that defined by the -``logging`` module of Python. It is an integer representing the criticality -of an event. The ``action`` parameter is an instance of Senlin action class, -which is defined in the ``senlin.engine.actions.base`` module. There is -virtually no constraints on which properties you will pick and how you want to -process them. - -Finally, the ``**kwargs`` parameter may provide some useful fields for you -to use: - -* ``timestamp``: A datetime value that indicates when the event was generated. -* ``phase``: A string value indicating the phase an action is in. Most of the - time this can be safely ignored. -* ``reason``: There are some rare cases where an event comes with a textual - description. Most of the time, this is empty. -* ``extra``: There are even rarer cases where an event comes with some - additional fields for attention. This can be safely ignored most of the - time. - - -Registering the New Dispatcher ------------------------------- - -For Senlin service to be aware of and thus to make use of the new dispatcher, -you will register it to the Senlin engine service. This is done by editing the -``setup.cfg`` file in the root directory of the code base, for example: - -:: - - [entry_points] - senlin.dispatchers = - database = senlin.events.database:DBEvent - message = senlin.events.message:MessageEvent - jsonfile = : - -Finally, save that file and do a reinstall of the Senlin service, followed -by a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - - -Dynamically Enabling/Disabling a Dispatcher -------------------------------------------- - -All dispatchers are loaded when the Senlin engine is started, however, they -can be dynamically enabled or disabled by editing the ``senlin.conf`` file. -The option ``event_dispatchers`` in the ``[DEFAULT]`` section is a multi-string -value option for this purpose. To enable your dispatcher (i.e. ``jsonfile``), -you will need to add the following line to the ``senlin.conf`` file: - -:: - - event_dispatchers = jsonfile diff --git a/doc/source/contributor/node.rst b/doc/source/contributor/node.rst deleted file mode 100644 index cc8d52a73..000000000 --- a/doc/source/contributor/node.rst +++ /dev/null @@ -1,202 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -===== -Nodes -===== - -A node is a logical entity managed by the Senlin service. Each node can belong -to at most one cluster. A node that does not belong to any cluster can be -referred to as an "orphan" node. - - -Node Properties -~~~~~~~~~~~~~~~ - -There are some common properties that are defined for all nodes. The following -properties are always available on a node: - -- ``profile_id``: ID of the profile from which the node is created. -- ``cluster_id``: When a node is a member of a cluster, the ``cluster_id`` - value indicates the ID of the owning cluster. For an orphan node, this - property is empty. -- ``name``: The name of a node doesn't have to be unique even in the scope of - the owning cluster (if there is one). For nodes created by Senlin service - upon policy enforcement or when performing certain actions, Senlin engine - will generate names for them automatically. -- ``index``: Each node has an ``index`` value which is unique in the scope of - its owning cluster. The value is used to uniquely identify the node inside - a cluster. For orphan nodes, the ``index`` value will be -1. -- ``role``: Each node in a cluster may have a role to play. The value of this - property is a string that specifies the role a node plays in the owning - cluster. Each profile type may support different set of roles. -- ``user``: ID of the user who is the creator (owner) of the node. -- ``project``: ID of the Keystone project in which the node was created. -- ``domain``: ID of the Keystone domain in which the node was created. -- ``init_at``: The timestamp when the node object was initialized. -- ``created_at``: The timestamp when the node was created. -- ``updated_at``: The timestamp when last time the node was updated. -- ``metadata``: A list of key-value pairs that are associated with the node. -- ``physical_id``: The UUID of the physical object that backs this node. The - property value is empty if there are no physical objects associated with it. -- ``status``: A string indicating the current status of the node. -- ``status_reason``: A string describing the reason why the node transited to - its current status. -- ``dependents``: A dict contains dependency information between nova server/ - heat stack node and container node. The container node's id will be stored - in 'dependents' property of its host node. - -In addition to the above properties, when a node is retrieved and shown to the -user, Senlin provides a pseudo-property named ``profile_name`` for user's -convenience. - - -Cluster Membership -~~~~~~~~~~~~~~~~~~ - -A prerequisite for a node to become a member of a cluster is that the node -must share the same profile type with the cluster. When adding nodes to an -existing cluster, Senlin engine will check if the profile types actually -match. - -It is *NOT* treated as an error that a node has a different profile -(identified by the profile object's ID) from the cluster. The profile -referenced by the cluster can be interpreted as the 'desired' profile, while -the profile referenced by individual nodes can be treated as the 'actual' -profile(s). When the cluster scales out, new nodes will use the 'desired' -profile referenced by the cluster. When existing nodes are added to an -existing cluster, the existing nodes may have different profile IDs from the -cluster. In this case, Senlin will not force an unnecessary profile update to -the nodes. - - -Creating A Node -~~~~~~~~~~~~~~~ - -When receiving a request to create a node, Senlin API checks if any required -fields are missing and whether there are invalid values specified to some -fields. The following fields are required for a node creation request: - -- ``name``: Name of the node to be created; -- ``profile_id``: ID of the profile to be used for creating the backend - physical object. - -Optionally, the request can contain the following fields: - -- ``cluster_id``: When specified, the newly created node will become a - member of the specified cluster. Otherwise, the new node will be an orphan - node. The ``cluster_id`` provided can be a name of a cluster, the UUID of a - cluster or the short ID of a cluster. -- ``role``: A string value specifying the role the node will play inside the - cluster. -- ``metadata``: A list of key-value pairs to be associated with the node. - - -Listing Nodes -~~~~~~~~~~~~~ - -Nodes in the current project can be queried/listed using some query parameters. -None of these parameters is required. By default, the Senlin API will return -all nodes that are not deleted. - -When listing nodes, the following query parameters can be specified, -individually or combined: - -- ``filters``: a map containing key-value pairs that will be used for matching - node records. Records that fail to match this criteria will be filtered out. - The following strings are valid as filter keys: - - * ``name``: name of nodes to list, can be a string or a list of strings; - * ``status``: status of nodes, can be a string or a list of strings; - -- ``cluster_id``: A string specifying the name, the UUID or the short ID of a - cluster for which the nodes are to be listed. -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of nodes in previous - queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a node as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``show_nested``: A boolean indicating whether nested clusters should be - included in the results. The default is True. This feature is yet to be - supported. -- ``global_project``: A boolean indicating whether node listing should be done - in a tenant safe way. When this value is specified as False (the default), - only nodes from the current project that match the other criteria will be - returned. When this value is specified as True, nodes that matching all other - criteria would be returned, no matter in which project the node was created. - Only a user with admin privilege is permitted to do a global listing. - - -Getting a Node -~~~~~~~~~~~~~~ - -When a user wants to check the details about a specific node, he or she can -specify one of the following values for query: - -- Node UUID: Query is performed strictly based on the UUID value given. This - is the most precise query supported. -- Node name: Senlin allows multiple nodes to have the same name. It is user's - responsibility to avoid name conflicts if needed. The output is the details - of a node if the node name is unique, otherwise Senlin will return a message - telling users that multiple nodes found matching this name. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching node is - returned, or else Senlin will return an error message indicating that - multiple nodes were found matching the specified short ID. - -Senlin engine service will try the above three ways in order to find a match -in database. - -In addition to the key for query, a user can provide an extra boolean option -named ``show_details``. When this option is set, Senlin service will retrieve -the properties about the physical object that backs the node. For example, for -a Nova server, this information will contain the IP address allocated to the -server, along with other useful information. - -In the returned result, Senlin injects the name of the profile used by the -node for the user's convenience. - - -Updating a Node -~~~~~~~~~~~~~~~ - -Some node properties are updatable after the node has been created. These -properties include: - -- ``name``: Name of node as seen by the user; -- ``role``: The role that is played by the node in its owning cluster; -- ``metadata``: The key-value pairs attached to the node; -- ``profile_id``: The ID of the profile used by the node. - -Note that update of ``profile_id`` is different from the update of other -properties in that it may take time to complete. When receiving a request to -update the profile used by a node, the Senlin engine creates an Action that -is executed asynchronously by a worker thread. - -When validating the node update request, Senlin rejects requests that attempt -to change the profile type used by the node. - - -Deleting a Node -~~~~~~~~~~~~~~~ - -A node can be deleted no matter if it is a member of a cluster or not. Node -deletion is handled asynchronously in Senlin. When the Senlin engine receives -a request, it will create an Action to be executed by a worker thread. diff --git a/doc/source/contributor/osprofiler.rst b/doc/source/contributor/osprofiler.rst deleted file mode 100644 index e5a67143a..000000000 --- a/doc/source/contributor/osprofiler.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== -OSProfiler -========== - -OSProfiler provides a tiny but powerful library that is used by -most (soon to be all) OpenStack projects and their python clients. It -provides functionality to be able to generate 1 trace per request, that goes -through all involved services. This trace can then be extracted and used -to build a tree of calls which can be quite handy for a variety of -reasons (for example in isolating cross-project performance issues). - -More about OSProfiler: -https://docs.openstack.org/osprofiler/latest/ - -Senlin supports using OSProfiler to trace the performance of each -key internal processing, including RESTful API, RPC, cluster actions, -node actions, DB operations etc. - -Enabling OSProfiler -~~~~~~~~~~~~~~~~~~~ - -To configure DevStack to enable OSProfiler, edit the -``${DEVSTACK_DIR}/local.conf`` file and add:: - - enable_plugin panko https://git.openstack.org/openstack/panko - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - enable_plugin osprofiler https://git.openstack.org/openstack/osprofiler - -to the ``[[local|localrc]]`` section. - -.. note:: The order of enabling plugins matter. - -Using OSProfiler -~~~~~~~~~~~~~~~~ - -After successfully deploy your development environment, following profiler -configs will be auto added to ``senlin.conf``:: - - [profiler] - enabled = true - trace_sqlalchemy = true - hmac_keys = SECRET_KEY - -``hmac_keys`` is the secret key(s) to use for encrypting context data for -performance profiling, default value is 'SECRET_KEY', you can modify it to -any random string(s). - -Run any command with ``--os-profile SECRET_KEY``:: - - $ openstack --os-profile SECRET_KEY cluster profile list - # it will print - -Get pretty HTML with traces:: - - $ osprofiler trace show --html diff --git a/doc/source/contributor/plugin_guide.rst b/doc/source/contributor/plugin_guide.rst deleted file mode 100644 index dfaa969d9..000000000 --- a/doc/source/contributor/plugin_guide.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -===================== -Plugin Writer's Guide -===================== - -Senlin provides an open design where developer can incorporate new profile -or policy implementations for different purposes. The following documents -describe how to develop and plug your own profile types and/or policy types. - - -.. toctree:: - :maxdepth: 1 - - policy_type - profile_type - event_dispatcher diff --git a/doc/source/contributor/policies/affinity_v1.rst b/doc/source/contributor/policies/affinity_v1.rst deleted file mode 100644 index 71fa758e1..000000000 --- a/doc/source/contributor/policies/affinity_v1.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Affinity Policy V1.0 -==================== - -This policy is designed for Senlin clusters to exploit the *servergroup* API -exposed by the Nova compute service. The basic policy has been extended to -work with vSphere hypervisor when VMware DRS feature is enabled. However, such -an extension is only applicable to *admin* owned server clusters. - -.. schemaspec:: - :package: senlin.policies.affinity_policy.AffinityPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that carries various input parameters to - resize a cluster. The policy will try to parse the raw inputs if no other - policies have done this. - -- ``NODE_CREATE``: an action originated from a node creation RPC request. - The policy is capable of processing the node associated with this action. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. When the action is ``CLUSTER_RESIZE``, the affinity policy will -check if it is about the creation of new nodes. If the resize request is about -the removal of existing nodes, the policy won't block the request. - -Senlin engine respects outputs (i.e. number of nodes to create) from other -policies, if any. If no such data exists, it then checks the user-provided -"``count``" input if there is one. The policy is also designed to parse a -cluster resize request and see if there are new nodes to be created. - -After validating the ``count`` value, the affinity policy proceeds to update -the ``data`` property of the action with node placement data. For example: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [ - {'servergroup': 'XYZ-ABCD'}, - {'servergroup': 'XYZ-ABCD'} - ] - } - } - - -Scenarios -~~~~~~~~~ - -S1: Inheriting Server Group from Profile ----------------------------------------- - -When attaching the affinity policy to a cluster that is based on a profile -type of ``os.nova.server-1.0``, if the profile contains ``scheduler_hints`` -property and the property value (a collection) has a ``group`` key, the engine -will use the value of the ``group`` key as a Nova server group name. In this -case, the affinity policy will check if the specified server group does exist. -If the group doesn't exist, or the rules specified in the group doesn't match -that specified (or implied) by the affinity policy, you will get an error when -attaching the policy to the cluster. If, on the contrary, the group is found -and the rules do match that of the current policy, the engine will record the -ID of the server group into the policy binding data. The engine also saves a -key-value pair ``inherited_group: True`` into the policy binding data, so that -in future the engine knows that the server group wasn't created from scratch -by the affinity policy. This will lead to the following data stored into the -policy binding data: - -:: - - { - 'AffinityPolicy': { - 'version': 1.0, - 'data': { - 'servergroup_id': 'XYZ-ABCD', - 'inherited_group': True - } - } - } - -When an affinity policy is to be detached from a cluster, the Senlin engine -will check and learn the server group was not created by the affinity policy. -The engine will not delete the server group. - -Before any of the targeted actions is executed, the affinity policy gets a -chance to be checked. It does so by looking into the policy binding data and -find out the server group ID to use. For node creation requests, the policy -will yield some data into ``action.data`` property that looks like: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [ - {'servergroup': 'XYZ-ABCD'}, - {'servergroup': 'XYZ-ABCD'} - ] - } - } - - -S2: Creating A Server Group when Needed ---------------------------------------- - -When attaching an affinity policy to a cluster, if the cluster profile doesn't -contain a ``scheduler_hints`` property or there is no ``group`` value -specified in the ``scheduler_hints`` property, the engine will create a new -server group by invoking the Nova API, providing it the policies specified (or -implied) as inputs. The ID of the newly created server group is then saved -into the policy binding data, along with a ``inherited_group: False`` key-value -pair. For example: - -:: - - { - 'AffinityPolicy': { - 'version': 1.0, - 'data': { - 'servergroup_id': 'XYZ-ABCD', - 'inherited_group': False - } - } - } - -When such a policy is later detached from the cluster, the Senlin engine will -check and learn that the server group should be deleted. It then deletes the -server group by invoking Nova API. - -When the targeted actions are about to be executed, the protocol for checking -and data saving is identical to that outlined in scenario *S1*. - - -S3: Enabling vSphere DRS Extensions ------------------------------------ - -When you have vSphere hosts (with DRS feature enabled) serving hypervisors to -Nova, a vSphere host is itself a collection of physical nodes. To make better -use of the vSphere DRS feature, you can enable the DRS extension by specifying -``enable_drs_extension: True`` in your affinity policy. - -When attaching and detaching the affinity policy to/from a cluster, the engine -operations are the same as described in scenario *S1* and *S2*. However, when -one of the targeted actions is triggered, the affinity policy will first check -if the ``availability_zone`` property is set and it will use "``nova``" as the -default value if not specified. - -The engine then continues to check the input parameters (as outlined above) to -find out the number of nodes to create. It also checks the server group ID to -use by looking into the policy binding data. - -After the policy has collected all inputs it needs, it proceeds to check the -available vSphere hypervisors with DRS enabled. It does so by looking into the -``hypervisor_hostname`` property of each hypervisor reported by Nova -(**Note**: retrieving hypervisor list is an admin-only API, and that is the -reason the vSphere extension is only applicable to admin-owned clusters). -The policy attempts to find a hypervisor whose host name contains ``drs``. If -it fails to find such a hypervisor, the policy check fails with the action's -``data`` field set to: - -:: - - { - 'status': 'ERROR', - 'status_reason': 'No suitable vSphere host is available.' - } - -The affinity uses the first matching hypervisor as the target host and it -forms a string containing the availability zone name and the hypervisor -host name, e.g. "``nova:vsphere_drs_1``". This string will later be used as -the availability zone name sent to Nova. For example, the following is sample -result when applying the affinity policy to a cluster with vSphere DRS -enabled. - -:: - - { - 'placement': { - 'count': 2, - 'placements': [{ - 'zone': 'nova:vsphere_drs_1', - 'servergroup': 'XYZ-ABCD' - }, { - 'zone': 'nova:vsphere_drs_1', - 'servergroup': 'XYZ-ABCD' - } - ] - } - } - -**NOTE**: The ``availability_zone`` property is effective even when the -vSphere DRS extension is not enabled. When ``availability_zone`` is explicitly -specified, the affinity policy will pass it along with the server group ID -to the Senlin engine for further processing, e.g.: - -:: - - { - 'placement': { - 'count': 2, - 'placements': [{ - 'zone': 'nova_1', - 'servergroup': 'XYZ-ABCD' - }, { - 'zone': 'nova_1', - 'servergroup': 'XYZ-ABCD' - } - ] - } - } diff --git a/doc/source/contributor/policies/deletion_v1.rst b/doc/source/contributor/policies/deletion_v1.rst deleted file mode 100644 index 46f4629fd..000000000 --- a/doc/source/contributor/policies/deletion_v1.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -==================== -Deletion Policy V1.1 -==================== - -The deletion policy is designed to be enforced when a cluster's size is to be -shrunk. - -.. schemaspec:: - :package: senlin.policies.deletion_policy.DeletionPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_DEL_NODES``: an action that carries a list value named - ``candidates`` in its ``inputs`` value. - -- ``CLUSTER_RESIZE``: an action that carries various key-value pairs as - arguments to the action in its ``inputs`` value. - -- ``NODE_DELETE``: an action that has a node associated with it. This action - has to be originated from a RPC request directly so that it will be - processed by the deletion policy. The node ID associated with the action - obviously become the 'candidate' node for deletion. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. - - -Scenarios -~~~~~~~~~ - -Under different scenarios, the policy works by checking different properties -of the action. - - -S1: ``CLUSTER_DEL_NODES`` -------------------------- - -This is the simplest case. An action of ``CLUSTER_DEL_NODES`` carries a list of -UUIDs for the nodes to be removed from the cluster. The deletion policy steps -in before the actual deletion happens so to help determine the following -details: - -- whether the nodes should be destroyed after being removed from the cluster; -- whether the nodes should be granted a grace period before being destroyed; -- whether the ``desired_capacity`` of the cluster in question should be - reduced after node removal. - -After the policy check, the ``data`` field is updated with contents similar to -the following example: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 2, - "candidates": ["", "` attached to the cluster, -Senlin engine takes the liberty to assume that the expectation is to remove -1 node from the cluster. This is equivalent to the case when ``count`` is -specified as ``1``. - -The policy then continues evaluate the cluster nodes to select ``count`` -victim node(s) based on the ``criteria`` property of the policy. Finally it -updates the action's ``data`` field with the list of node candidates along -with other properties, as described in scenario **S1**. - - -S3: ``CLUSTER_SCALE_IN`` with Scaling Policy --------------------------------------------- - -If there is a :doc:`scaling policy ` attached to the cluster, that -policy will yield into the action's ``data`` property some contents similar to -the following example: - -:: - - { - "deletion": { - "count": 2 - } - } - -The senlin engine will use value from the ``deletion.count`` field in the -``data`` property as the number of nodes to remove from cluster. It selects -victim nodes from the cluster based on the ``criteria`` specified and then -updates the action's ``data`` property along with other properties, as -described in scenario **S1**. - - -S4: ``CLUSTER_RESIZE`` without Scaling Policy ---------------------------------------------- - -If there is no :doc:`scaling policy ` attached to the cluster, -the deletion policy won't be able to find a ``deletion.count`` field in the -action's ``data`` property. It then checks the ``inputs`` property of the -action directly and generates a ``deletion.count`` field if the request turns -out to be a scaling-in operation. If the request is not a scaling-in -operation, the policy check aborts immediately. - -After having determined the number of nodes to remove, the policy proceeds to -select victim nodes based on its ``criteria`` property value. Finally it -updates the action's ``data`` field with the list of node candidates along -with other properties, as described in scenario **S1**. - - -S5: ``CLUSTER_RESIZE`` with Scaling Policy ------------------------------------------- - -In the case there is already a :doc:`scaling policy ` attached to -the cluster, the scaling policy will be evaluated before the deletion policy, -so the policy works in the same way as described in scenario **S3**. - - -S6: Deletion across Multiple Availability Zones ------------------------------------------------ - -When you have a :doc:`zone placement policy ` attached to -a cluster, the zone placement policy will decide in which availability zone(s) -new nodes will be placed and from which availability zone(s) old nodes should -be deleted to maintain an expected node distribution. Such a zone placement -policy will be evaluated before this deletion policy, according to its builtin -priority value. - -When scaling in a cluster, a zone placement policy yields a decision into the -action's ``data`` property that looks like: - -:: - - { - "deletion": { - "count": 3, - "zones": { - "AZ-1": 2, - "AZ-2": 1 - } - } - } - -The above data indicate how many nodes should be deleted globally and how many -nodes should be removed from each availability zone. The deletion policy then -evaluates nodes from each availability zone to select specified number of -nodes as candidates. This selection process is also based on the ``criteria`` -property of the deletion policy. - -After the evaluation, the deletion policy completes by modifying the ``data`` -property to something like: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 3, - "candidates": ["node-id-1", "node-id-2", "node-id-3"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } - -In the ``deletion.candidates`` list, two of the nodes are from availability -zone ``AZ-1``, one of the nodes is from availability zone ``AZ-2``. - -S6: Deletion across Multiple Regions ------------------------------------- - -When you have a :doc:`region placement policy ` attached -to a cluster, the region placement policy will decide to which region(s) new -nodes will be placed and from which region(s) old nodes should be deleted to -maintain an expected node distribution. Such a region placement policy will be -evaluated before this deletion policy, according to its builtin priority value. - -When scaling in a cluster, a region placement policy yields a decision into -the action's ``data`` property that looks like: - -:: - - { - "deletion": { - "count": 3, - "region": { - "R-1": 2, - "R-2": 1 - } - } - } - -The above data indicate how many nodes should be deleted globally and how many -nodes should be removed from each region. The deletion policy then evaluates -nodes from each region to select specified number of nodes as candidates. This -selection process is also based on the ``criteria`` property of the deletion -policy. - -After the evaluation, the deletion policy completes by modifying the ``data`` -property to something like: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 3, - "candidates": ["node-id-1", "node-id-2", "node-id-3"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } - -In the ``deletion.candidates`` list, two of the nodes are from region ``R-1``, -one of the nodes is from region ``R-2``. - - -S7: Handling ``NODE_DELETE`` Action ------------------------------------ - -If the action that triggered the policy checking is a ``NODE_DELETE`` action, -the action has an associated node as its property. When the deletion policy -has detected this action type, it will copy the policy specification values -into the action's ``data`` field although the ``count`` and ``candidates`` -value are so obvious. For example: - -:: - - { - "status": "OK", - "reason": "Candidates generated", - "deletion": { - "count": 1, - "candidates": ["node-id-1"] - "destroy_after_deletion": true, - "grace_period": 0 - } - } diff --git a/doc/source/contributor/policies/health_v1.rst b/doc/source/contributor/policies/health_v1.rst deleted file mode 100644 index 64516a038..000000000 --- a/doc/source/contributor/policies/health_v1.rst +++ /dev/null @@ -1,373 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -================== -Health Policy V1.1 -================== - -The health policy is designed to automate the failure detection and recovery -process for a cluster. - -.. schemaspec:: - :package: senlin.policies.health_policy.HealthPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_RECOVER``: an action that carries some optional parameters as its - inputs. The parameters are specific to the profile type of the target - cluster. - -- ``CLUSTER_DEL_NODES``: an action that carries a list value named - ``candidates`` in its ``inputs`` value. - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs`` value. - -- ``CLUSTER_RESIZE``: an action that carries various key-value pairs as - arguments to the action in its ``inputs`` value. - -- ``NODE_DELETE``: an action that has a node associated with it. This action - has to be originated from a RPC request directly so that it will be - processed by the health policy. - -The policy will be checked **BEFORE** a ``CLUSTER_RECOVER`` action is executed. -It will derive the appropriate inputs to the action based on the policy's -properties. - -The policy will be checked **BEFORE** and **AFTER** any one of the -``CLUSTER_DEL_NODES``, ``CLUSTER_SCALE_IN``, ``CLUSTER_RESIZE`` and the -``NODE_DELETE`` action is executed. Under the condition that any of these -actions are originated from RPC requests, Senlin is aware of the fact that -a cluster is losing node member(s) because of a normal cluster membership -management operation initiated by users rather than unexpected node failures. -The health policy will temporarily disable the *health manager* function on -the cluster in question and re-enable the health management after the action -has completed. - -The health policy can be treated as an interface for the *health manager* -engine running inside the ``senlin-engine`` process. Its specification -contains two main "sections", ``detection`` and ``recovery``, each of which -specifies how to detect node failures and how to recover a node to a healthy -status respectively. - - -Failure Detection -~~~~~~~~~~~~~~~~~ - -The health policy is designed to be flexible regarding node failure detection. -The current vision is that the health policy will support following types -of failure detection: - -* ``NODE_STATUS_POLLING``: the *health manager* periodically polls a cluster - and checks if there are nodes inactive. - -* ``NODE_STATUS_POLL_URL``: the *health manager* periodically polls a URL - and checks if a node is considered healthy based on the response. - -* ``LIFECYCLE_EVENTS``: the *health manager* listens to event notifications - sent by the backend service (e.g. nova-compute). - -* ``LB_STATUS_POLLING``: the *health manager* periodically polls the load - balancer (if any) and see if any node has gone offline. - -The third option above (``LB_STATUS_POLLING``) is not usable yet due to an -outstanding issue in the LBaaS service. But we are still tracking its progress -considering that metrics from the load-balancer is more trust-worthy and more -useful because they originate from the data plane rather than the control -plane. - -Yet another option regarding load-balancer based health detection is to have -the load-balancer emit event notifications when node status changes. This is -also an ongoing work which may take some time to land. - - -Proactive Node Status Polling ------------------------------ - -The most straight-forward way of node failure detection is by checking the -backend service about the status of the physical resource represented by a -node. If the ``type`` of ``detection`` is set to "``NODE_STATUS_POLLING``" -(optionally, with an ``interval`` value specified), the *health manager* will -periodically check the resource status by querying the backend service and see -if the resource is active. Below is a sample configuration:: - - type: senlin.policy.health - version: 1.1 - properties: - detection: - interval: 120 - detection_modes: - - type: NODE_STATUS_POLLING - ... - -Once such a policy object is attached to a cluster, Senlin registers the -cluster to the *health manager* engine for failure detection, i.e., node -health checking. A thread is created to periodically call Nova to check the -status of the node. If the server status is ERROR, SHUTOFF or DELETED, the node -is considered unhealthy. - -When one of the ``senlin-engine`` services is restarted, a new *health manager* -engine will be launched. This new engine will check the database and see if -there are clusters which have health policies attached and thus having its -health status maintained by a *health manager* that is no longer alive. The -new *health manager* will pick up these clusters for health management. - - -Polling Node URL ----------------- - -The health check for a node can also be configured to periodically query a -URL with the ``NODE_STATUS_POLL_URL`` detection type. The URL can optionally -contain expansion parameters. Expansion parameters are strings enclosed in {} -that will be substituted with the node specific value by Senlin prior to -querying the URL. The only valid expansion parameter at this point is -``{nodename}``. This expansion parameter will be replaced with the name of the -Senlin node. Below is a sample configuration:: - - - type: senlin.policy.health - version: 1.1 - properties: - detection: - interval: 120 - detection_modes: - - type: NODE_STATUS_POLL_URL - options: - poll_url: "http://{nodename}/healthstatus" - poll_url_healthy_response: "passing" - poll_url_conn_error_as_unhealty: true - poll_url_retry_limit: 3 - poll_url_retry_interval: 2 - ... - - -.. note:: - ``{nodename}`` can be used to query a URL implemented by an - application running on each node. This requires that the OpenStack cloud - is setup to automatically register the name of new server instances with - the DNS service. In the future support for a new expansion parameter for - node IP addresses may be added. - -Once such a policy object is attached to a cluster, Senlin registers the -cluster to the *health manager* engine for failure detection, i.e., node -health checking. A thread is created to periodically make a GET request on the -specified URL. ``poll_url_conn_error_as_unheathy`` specifies the behavior if -the URL is unreachable. A node is considered healthy if the response to the GET -request includes the string specified by ``poll_url_healthy_response``. If it -does not, Senlin will retry the URL health check for the number of times -specified by ``poll_url_retry_limit`` while waiting the number of seconds in -``poll_url_retry_interval`` between each retry. If the URL response still does -not contain the expected string after the retries, the node is considered -healthy. - - -Listening to Event Notifications --------------------------------- - -For some profile types (currently ``os.nova.server``), the backend service may -emit an event notification on the control plane message bus. These events are -more economic ways for node failure detection, assuming that all kinds of -status changes will be captured and reported by the backend service. Actually, -we have verified that most lifecycle events related to a VM server are already -captured and reported by Nova. For other profile types such as -``os.heat.stack``, there also exists such a possibility although based on our -knowledge Heat cannot detect all stack failures. - -Event listening is a cheaper way for node failure detection when compared to -the status polling approach described above. To instruct the *health manager* -to listen to event notifications, users can attach their cluster(s) a health -policy which looks like the following example:: - - type: senlin.policy.health - version: 1.1 - properties: - detection: - type: LIFECYCLE_EVENTS - - ... - -When such a policy is attached to a cluster, Senlin registers the cluster to -the *health manager* engine for failure detection, i.e., node health checking. -A listener thread is created to listen to events that indicate certain node -has failed. For nova server nodes, the current implementation treats all of -the following event types as indication of node failures: - -* ``compute.instance.pause.end``: A server has been accidentally paused. -* ``compute.instance.power_off.end``: A server has been stopped accidentally. -* ``compute.instance.rebuild.error``: A server rebuild has failed. -* ``compute.instance.shutdown.end``: A server has been shut down for unknown - reasons. -* ``compute.instance.soft_delete.end``: A server has been soft deleted. - -When any one of such an event is heard by the listener thread, it will issue -a ``NODE_RECOVER`` RPC request to the senlin-engine service. For the health -policy to make a smarter decision on the proper recover operation, the RPC -request is augmented with some parameters as hints to the recovery operation -as exemplified below:: - - { - "event": "SHUTDOWN", - "state": "shutdown", - "instance_id": "449ad837-3db2-4aa9-b324-ecd28e14ab14", - "timestamp": "2016-11-27T12:10:58Z", - "publisher": "nova-compute:node1", - } - -Ideally, a health management solution can react differently based on the -different types of failures detected. For example, a server stopped by accident -can be simply recovered by start it again; a paused server can be unpaused -quickly instead of being recreated. - -When one of the ``senlin-engine`` services is restarted, a new *health manager* -engine will be launched. This new engine will check the database and see if -there are clusters which have health policies attached and thus having its -health status maintained by a *health manager* that is no longer alive. The -new *health manager* will pick up these clusters for health management. - - -Recovery Actions -~~~~~~~~~~~~~~~~ - -The value of the recovery ``actions`` key for ``recovery`` is modeled as a -list, each of which specifies an action to try. The list of actions are to be -adjusted by the policy before passing on to a base ``Profile`` for actual -execution. An example (imaginary) list of actions is shown below:: - - type: senlin.policy.health - version: 1.0 - properties: - ... - recovery: - actions: - - name: REBOOT - params: - type: soft - - name: REBUILD - - name: my_evacuation_workflow - type: MISTRAL_WORKFLOW - params: - node_id: {{ node.physicalid }} - -The above specification basically tells Senlin engine to try a list of -recovery actions one by one. The first thing to try is to "reboot" (an -operation only applicable on a Nova server) the failed node in question. If -that didn't solve the problem, the engine is expected to "rebuild" (also a -Nova server specific verb) the failed node. If this cannot bring the node back -to healthy status, the engine should execute a Mistral workflow named -"``my_evacuation_workflow``" and pass in the physical ID of the node. - -The health policy is triggered when a ``CLUSTER_RECOVER`` action is to be -executed. Using the above example, the policy object will fill in the ``data`` -field of the action object with the following content:: - - { - "health": { - "recover_action": [ - { - "name": "REBOOT", - "params": { - "type": "soft" - } - }, - { - "name": "REBUILD" - }, - { - "name": "my_evacuation_workflow", - "type": "MISTRAL_WORKFLOW", - "params": { - "node_id": "7a753f4b-417d-4c10-8065-681f60db0c9a" - } - } - ] - ... - } - } - -This action customization is eventually passed on to the ``Profile`` base -class where the actual actions are performed. - -**NOTE**: Currently, we only support a single action in the list. The support -to Mistral workflow is also an ongoing work. - - -Default Recovery Action ------------------------ - -Since Senlin is designed to manage different types of resources, each resource -type, i.e. :term:`Profile Type`, may support different sets of operations that -can be used for failure recovery. - -A more practical and more general operation to recover a failed resource is to -delete the old one followed by creating a new one, thus a ``RECREATE`` -operation. Note that the ``RECREATE`` action is although generic enough, it -may and may not be what users want. For example, there is not guarantee that a -recreated Nova server will preserve its physical ID or its IP address. The -temporary status of the original server will be lost for sure. - - -Profile-specific Recovery Actions ---------------------------------- - -Each profile type supports a unique set of operations, some of which are -relevant to failure recovery. For example, a Nova server may support many -operations that can be used for failure recovery, a Heat stack may support -only the ``STACK_UPDATE`` operation for recovery. This set of actions that can -be specified for recovery is profile specific, thus an important part for the -policy to check and validate. - - -External Recovery Actions -------------------------- - -In real-life deployments, there are use cases where a simple recovery of a -node itself is not sufficient to bring back the business services or -applications that were running on those nodes. There are other use cases where -appropriate actions must be taken on the storage and/or network used for a -full failure recovery. These are the triggers for the Senlin team to bring in -support to Mistral workflows as special actions. - -The current design is to allow for a mixture of built-in recovery actions and -user provided workflows. In the foreseeable future, Senlin does not manage the -workflows to be executed and the team has no plan to support the debugging of -workflow executions. Users have to make sure their workflows are doing things -they want. - - -Fencing Support -~~~~~~~~~~~~~~~ - -The term "fencing" is used to describe the operations that make sure a -seemingly failed resource is dead for sure. This is a very important aspect in -all high-availability solutions. Take a Nova server failure as an example, -there are many causes which can lead the server into an inactive status. A -physical host crash, a network connection breakage etc. can all result in a -node unreachable. From Nova controller's perspective, it may appear that the -host has gone offline, however, what really happened could be just the -management network is experiencing some problems. The host is actually still -there, all the VM instances on it are still active, which means they are still -processing requests and they are still using the IP addresses allocated to -them by a DHCP server. - -There are many such cases where a seemingly inactive node is still working and -these nodes will bring the whole cluster into unpredictable status if we only -attempt an immature recovery action without considering the possibility that -the nodes are still alive. - -Considering this, we are working on modeling and implementing support to -fencing in the health policy. diff --git a/doc/source/contributor/policies/load_balance_v1.rst b/doc/source/contributor/policies/load_balance_v1.rst deleted file mode 100644 index ac9d13044..000000000 --- a/doc/source/contributor/policies/load_balance_v1.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -========================== -Load Balancing Policy V1.1 -========================== - -This policy is designed to enable senlin clusters to leverage the Neutron -LBaaS V2 features so that workloads can be distributed across nodes in a -reasonable manner. - -.. schemaspec:: - :package: senlin.policies.lb_policy.LoadBalancingPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_ADD_NODES``: an action that carries a list of node IDs for the - nodes (servers) to be added into the cluster. - -- ``CLUSTER_DEL_NODES``: an action that carries a list of node IDs for the - nodes (servers) to be removed from the cluster. - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that carries some additional parameters that - specifying the details about the resize request, e.g. ``adjustment_type``, - ``number`` etc. in its ``inputs``. - -- ``NODE_CREATE``: an action originated directly from RPC request and it has - a node associated with it. - -- ``NODE_DELETE``: an action originated directly from RPC request and it has - a node associated with it. - -The policy will be checked **AFTER** one of the above mentioned actions that -adds new member nodes for the cluster is executed. It is also checked -**BEFORE** one of the above actions that removes existing members from the -cluster is executed. - - -Policy Properties -~~~~~~~~~~~~~~~~~ - -The load-balancing policy has its properties grouped into three categories: -``pool``, ``vip`` and ``health_monitor``. The ``pool`` property accepts a map -that contains detailed specification for the load-balancing pool that -contains the nodes as members such as "``protocol``", "``protocol_port``", -"``subnet``", "``lb_method``" etc. Most of the properties have a default value -except for the "``subnet``" which always requires an input. - -The ``vip`` property also accepts a map that contains detailed specification -for the "virtual IP address" visible to the service users. These include for -example "``subnet``", "``address``", "``protocol``", "``protocol_port``" -values to be associated/assigned to the VIP. - -The ``health_monitor`` property accepts a map that specifies the details about -the configuration of the "health monitor" provided by (embedded into) the -load-balancer. The map may contain values for keys like "``type``", -"``delay``", "``max_retries``", "``http_method``" etc. - -For more details specifications of the policy specifications, you can use the -:command:`openstack cluster policy type show senlin.policy.loadbalance-1.1` -command. - - -Load Balancer Management -~~~~~~~~~~~~~~~~~~~~~~~~ - -When attaching a loadbalance policy to a cluster, the engine will always try -to create a new load balancer followed by adding existing nodes to the new -load-balancer created. If any member node cannot be added to the -load-balancer, the engine refuses to attach the policy to the cluster. - -After having successfully added a node to the load balancer, the engine saves -a key-value pair "``lb_member: ``" into the ``data`` field of the node. -After all existing nodes have been successfully added to the load balancer, -the engine saves the load balancer information into the policy binding data. -The information stored is something like the following example: - -:: - - { - "LoadBalancingPolicy": { - "version": 1.0, - "data": { - "loadbalancer": "bb73fa92-324d-47a6-b6ce-556eda651532", - "listener": "d5f621dd-5f93-4adf-9c76-51bc4ec9f313", - "pool": "0f58df07-77d6-4aa0-adb1-8ac6977e955f", - "healthmonitor": "83ebd781-1417-46ac-851b-afa92844252d" - } - } - } - -When detaching a loadbalance policy from a cluster, the engine first checks -the information stored in the policy binding data where it will find the IDs -of the load balancer, the listener, the health monitor etc. It then proceeds -to delete these resources by invoking the LBaaS APIs. If any of the resources -cannot be deleted for some reasons, the policy detach request will be -rejected. - -After all load balancer resources are removed, the engine will iterate through -all cluster nodes and delete the "``lb_member``" key-value pair stored there. -When all nodes have been virtually detached from the load-balancer, the detach -operation returns with a success. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -When scaling in a cluster, there may and may not be a scaling policy attached -to the cluster. The loadbalance policy has to cope with both cases. The -loadbalance policy first attempts to get the number of nodes to remove then it -tries to get the candidate nodes for removal. - -It will first check if there is a "``deletion``" key in the action's ``data`` -field. If it successfully finds it, it means there are other policies already -helped decide the number of nodes to remove, even the candidate nodes for -removal. If the "``deletion``" key is not found, it means the policy has to -figure out the deletion count itself. It first checks if the action has an -input named "``count``". The ``count`` value will be used if found, or else it -will assume the ``count`` to be 1. - -When the policy finds that the candidate nodes for removal have not yet been -chosen, it will try a random selection from all cluster nodes. - -After the policy has figured out the candidate nodes for removal, it invokes -the LBaaS API to remove the candidates from the load balancer. If any of the -removal operation fails, the scale in operation fails before node removal -actually happens. - -When all candidates have been removed from the load balancer, the scale in -operation continues to delete the candidate nodes. - -S2: ``CLUSTER_DEL_NODES`` -------------------------- - -When deleting specified nodes from a cluster, the candidate nodes are already -provided in the action's ``inputs`` property, so the loadbalance policy just -iterate the list of candidate nodes to update the load balancer. The load -balancer side operation is identical to that outlined in scenario *S1*. - -S3: ``CLUSTER_RESIZE`` that Shrinks a Cluster ---------------------------------------------- - -For a cluster resize operation, the loadbalance policy is invoked **BEFORE** -the operation is attempting to remove any nodes from the cluster. If there are -other policies (such as a scaling policy or a deletion policy) attached to the -cluster, the number of nodes along with the candidate nodes might have already -been decided. - -The policy first checks the "``deletion``" key in the action's ``data`` field. -If it successfully finds it, it means there are other policies already helped -decide the number of nodes to remove, even the candidate nodes for removal. -If the "``deletion``" key is not found, it means the policy has to figure out -the deletion count itself. In the latter case, the policy will try to parse -the ``inputs`` property of the action and see if it is about to delete nodes -from the cluster. If the action is indeed about removing nodes, then the -policy gets what it wants, i.e. the ``count`` value. If the action is not -about deleting nodes, then the action passes the policy check directly. - -After having figured out the number of nodes to delete, the policy may still -need to decide which nodes to remove, i.e. the candidates. When no other -policy has made a decision, the loadbalance policy randomly chooses the -specified number of nodes as candidates. - -After the candidates is eventually selected, the policy proceeds to update the -load balancer as outlined in scenario *S1*. - -S4: ``CLUSTER_SCALE_OUT`` -------------------------- - -The policy may be checked **AFTER** a scale out operation is performed on the -cluster. After new nodes have been created into the cluster, the loadbalance -policy needs to notify the load balancer about the new members added. -When the loadbalance policy is checked, there may and may not be other -policies attached to the cluster. So the policy will need to check both cases. - -It first checks if there is a "``creation``" key in the action's ``data`` -field. If the "``creation``" key is not found, it means the operation has -nothing to do with the loadbalance policy. For example, it could be a request -to resize a cluster, but the result is about removal of existing nodes instead -of creation of new nodes. In this case, the policy checking aborts immediately. - -When new nodes are created, the operation is expected to have filled the -action's ``data`` field with data that looks like the following example: - -:: - - { - "creation": { - "count": 2, - "nodes": [ - "4e54e810-6579-4436-a53e-11b18cb92e4c", - "e730b3d0-056a-4fa3-9b1c-b1e6e8f7d6eb", - ] - } - } - -The "``nodes``" field in the ``creation`` map always contain a list of node -IDs for the nodes that have been created. After having get the node IDs, the -policy proceeds to add these nodes to the load balancer (recorded in the -policy binding data) by invoking the LBaaS API. If any update operation to the -load balancer fails, the policy returns with an error message. If a node has -been successfully added to the load balancer, the engine will record the -load balancer IDs into the node's ``data`` field. - -S5: ``CLUSTER_ADD_NODES`` -------------------------- - -When a ``CLUSTER_ADD_NODES`` operation is completed, it will record the IDs of -the nodes into the ``creation`` property of the action's ``data`` field. The -logic to update the load balancer and the logic to update the ``data`` field -of individual nodes are identical to that described in scenario *S4*. - -S6: ``CLUSTER_RESIZE`` that Expands a Cluster ---------------------------------------------- - -When a ``CLUSTER_RESIZE`` operation is completed and the operation results in -some new nodes created and added to the cluster, it will record the IDs of -the nodes into the ``creation`` property of the action's ``data`` field. The -logic to update the load balancer and the logic to update the ``data`` field -of individual nodes are identical to that described in scenario *S4*. - -S7: Handling ``NODE_CREATE`` Action ------------------------------------ - -When the action to be processed is a ``NODE_CREATE`` action, the new node has -been created and it is yet to be attached to the load balancer. The logic to -update the load balancer and the ``data`` field of the node in question are -identical to that described in scenario *S4*. - -When the action to be processed is a ``NODE_DELETE`` action, the node is about -to be removed from the cluster. Before that, the policy is responsible to -detach it from the load balancer. The logic to update the load balancer and -the ``data`` field of the node in question are identical to that described in -scenario *S1*. diff --git a/doc/source/contributor/policies/region_v1.rst b/doc/source/contributor/policies/region_v1.rst deleted file mode 100644 index e6f42c171..000000000 --- a/doc/source/contributor/policies/region_v1.rst +++ /dev/null @@ -1,232 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============================ -Region Placement Policy V1.0 -============================ - -This policy is designed to make sure the nodes in a cluster are distributed -across multiple regions according to a specified scheme. - -.. schemaspec:: - :package: senlin.policies.region_placement.RegionPlacementPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that accepts a map as its input parameters in - its ``inputs`` property, such as "``adjustment_type``", "``number``" etc. - -- ``NODE_CREATE``: an action originated directly from a RPC request. This - action has an associated node object that will be created. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the region -placement policy need to parse the inputs in different scenarios. - -The placement policy can be used independently, with and without other polices -attached to the same cluster. So the policy needs to understand whether there -are policy decisions from other policies (such as a -:doc:`scaling policy `). - -When the policy is checked, it will first attempt to get the proper ``count`` -input value, which may be an outcome from other policies or the inputs for -the action. For more details, check the scenarios described in following -sections. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``deletion`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``deletion`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``deletion`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -deleted), it validates the list of region names provided to the policy. If for -some reason, none of the provided names passed the validation, the policy -check fails with the following data recorded in the action's ``data`` -property: - -:: - - { - "status": "ERROR", - "reason": "No region is found usable.", - } - -With the list of regions known to be good and the map of node distribution -specified in the policy spec, senlin engine continues to calculate a placement -plan that best matches the desired distribution. - -If there are nodes that cannot be fit into the distribution plan, the policy -check failed with an error recorded in the action's ``data``, as shown below: - -:: - - { - "status": "ERROR", - "reason": "There is no feasible plan to handle all nodes." - } - -If there is a feasible plan to remove nodes from each region, the policy saves -the plan into the ``data`` property of the action as exemplified below: - -:: - - { - "status": "OK", - "deletion": { - "count": 3, - "regions": { - "RegionOne": 2, - "RegionTwo": 1 - } - } - } - -This means in total, 3 nodes should be removed from the cluster. Among them, -2 nodes should be selected from region "``RegionOne``" and the rest one should -be selected from region "``RegionTwo``". - -**NOTE**: When there is a :doc:`deletion policy ` attached to the -same cluster. That deletion policy will be evaluated after the region -placement policy and it is expected to rebase its candidate selection on the -region distribution enforced here. For example, if the deletion policy is -tasked to select the oldest nodes for deletion, it will adapt its behavior to -select the oldest nodes from each region. The number of nodes to be chosen -from each region would be based on the output from this placement policy. - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``creation`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``creation`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default node count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -created), it validates the list of region names provided to the policy and -extracts the current distribution of nodes among those regions. - -If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No region is found usable.", - } - -The logic of generating a distribution plan is almost identical to what have -been described in scenario *S1*, except for the output format. When there is -a feasible plan to accommodate all nodes, the plan is saved into the ``data`` -property of the action as shown in the following example: - -:: - - { - "status": "OK", - "creation": { - "count": 3, - "regions": { - "RegionOne": 1, - "RegionTwo": 2 - } - } - } - -This means in total, 3 nodes should be created into the cluster. Among them, -2 nodes should be created at region "``RegionOne``" and the left one should be -created at region "``RegionTwo``". - -S3: ``CLUSTER_RESIZE`` ----------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If the ``creation`` field is not found, the policy -tries to find if there is a ``deletion`` field in the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If neither ``creation`` nor ``deletion`` is found -in the action's ``data`` property, the policy proceeds to parse the raw inputs -of the action. - -The output from the parser may indicate an invalid combination of input -values. If that is the case, the policy check fails with the action's -``data`` set to something like the following example: - -:: - - { - "status": "ERROR", - "reason": - } - -If the parser successfully parsed the action's raw inputs, the policy tries -again to find if there is either ``creation`` or ``deletion`` field in the -action's ``data`` property. It will use the ``count`` value from the field -found as the number of nodes to be handled. - -When the placement policy finds out the number of nodes to create (or delete), -it proceeds to calculate a distribution plan. If the action is about growing -the size of the cluster, the logic and the output format are the same as that -have been outlined in scenario *S2*. Otherwise, the logic and the output -format are identical to that have been described in scenario *S1*. - - -S4: ``NODE_CREATE`` -------------------- - -When handling a ``NODE_CREATE`` action, the region placement policy only needs -to deal with the node associated with the action. If, however, the node is -referencing a profile which has a ``region_name`` specified in its spec, this -policy will avoid choosing deployment region for the node. In other words, the -``region_name`` specified in the profile spec used takes precedence. - -If the profile spec doesn't specify a region name, this placement policy will -proceed to do an evaluation of current region distribution followed by a -calculation of a distribution plan. The logic and the output format are the -same as that in scenario *S2*, although the number of nodes to handle is one -in this case. diff --git a/doc/source/contributor/policies/scaling_v1.rst b/doc/source/contributor/policies/scaling_v1.rst deleted file mode 100644 index 1ceae9050..000000000 --- a/doc/source/contributor/policies/scaling_v1.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -=================== -Scaling Policy V1.0 -=================== - -This policy is designed to help decide the detailed, quantitative parameters -used for scaling in/out a cluster. Senlin does provide a more complicated API -for resizing a cluster (i.e. ``cluster_resize``), however, in some use cases, -we cannot assume the requesters have all the factors to determine each and -every detailed parameters for resizing a cluster. There are cases where the -only thing a requester knows for sure is that a cluster should be scaled out, -or be scaled in. A scaling policy helps derive appropriate, quantitative -parameters for such a request. - -Note that when calculating the target capacity of the cluster, Senlin only -considers the **ACTIVE** nodes. - -.. schemaspec:: - :package: senlin.policies.scaling_policy.ScalingPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the scaling policy -exposes a "``event``" property to differentiate a policy instance. This is -purely an implementation convenience. - -Senlin engine respects the user-provided "``count``" input parameter if it is -specified. Or else, the policy computes a ``count`` value based on the policy's -``adjustment`` property. In both cases, the policy will validate the targeted -capacity against the cluster's size constraints. - -After validating the ``count`` value, the deletion policy proceeds to update -the ``data`` property of the action based on the validation result. If the -validation fails, the ``data`` property of the action will be updated to -something similar to the following example: - -:: - - { - "status": "ERROR", - "reason": "The target capacity (3) is less than cluster's min_size (2)." - } - -If the validation succeeds, the ``data`` property of the action is updated -accordingly (see Scenarios below). - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The request may carry a "``count``" parameter in the action's ``inputs`` field. -The scaling policy respects the user input if provided, or else it will -calculate the number of nodes to be removed based on other properties of the -policy. In either case, the policy will check if the ``count`` value is a -positive integer (or it can be convert to one). - -In the next step, the policy check if the "``best_effort``" property has been -set to ``True`` (default is ``False``). When the value is ``True``, the policy -will attempt to use the actual difference between the cluster's minimum size -and its current capacity rather than the ``count`` value if the latter is -greater than the former. - -When the proper ``count`` value is generated and passes validation, the policy -updates the ``action`` property of the action into something like the -following example: - -:: - - { - "status": "OK", - "reason": "Scaling request validated.", - "deletion": { - "count": 2 - } - } - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The request may carry a "``count``" parameter in the action's ``inputs`` field. -The scaling policy respects the user input if provided, or else it will -calculate the number of nodes to be added based on other properties of the -policy. In either case, the policy will check if the ``count`` value is a -positive integer (or it can be convert to one). - -In the next step, the policy check if the "``best_effort``" property has been -set to ``True`` (default is ``False``). When the value is ``True``, the policy -will attempt to use the actual difference between the cluster's maximum size -and its current capacity rather than the ``count`` value if the latter is -greater than the former. - -When the proper ``count`` value is generated and passes validation, the policy -updates the ``action`` property of the action into something like the -following example: - -:: - - { - "status": "OK", - "reason": "Scaling request validated.", - "creation": { - "count": 2 - } - } - - -S3: Cross-region or Cross-AZ Scaling ------------------------------------- - -When scaling a cluster across multiple regions or multiple availability zones, -the scaling policy will be evaluated before the -:doc:`region placement policy ` or the -:doc:`zone placement policy ` respectively. Based on -builtin priority settings, checking of this scaling policy always happen -before the region placement policy or the zone placement policy. - -The ``creation.count`` or ``deletion.count`` field is expected to be respected -by the region placement or zone placement policy. In other words, those -placement policies will base their calculation of node distribution on the -``creation.count`` or ``deletion.count`` value respectively. diff --git a/doc/source/contributor/policies/zone_v1.rst b/doc/source/contributor/policies/zone_v1.rst deleted file mode 100644 index 6df931f95..000000000 --- a/doc/source/contributor/policies/zone_v1.rst +++ /dev/null @@ -1,235 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -========================== -Zone Placement Policy V1.0 -========================== - -This policy is designed to make sure the nodes in a cluster are distributed -across multiple availability zones according to a specified scheme. - -.. schemaspec:: - :package: senlin.policies.zone_placement.ZonePlacementPolicy - - -Actions Handled -~~~~~~~~~~~~~~~ - -The policy is capable of handling the following actions: - -- ``CLUSTER_SCALE_IN``: an action that carries an optional integer value named - ``count`` in its ``inputs``. - -- ``CLUSTER_SCALE_OUT``: an action that carries an optional integer value - named ``count`` in its ``inputs``. - -- ``CLUSTER_RESIZE``: an action that accepts a map as its input parameters in - its ``inputs`` property, such as "``adjustment_type``", "``number``" etc. - -- ``NODE_CREATE``: an action originated directly from a RPC request. Such an - action will have a node object associated with it, which becomes the one to - be handled by this policy. - -The policy will be checked **BEFORE** any of the above mentioned actions is -executed. Because the same policy implementation is used for covering both the -cases of scaling out a cluster and the cases of scaling in, the zone placement -policy need to parse the inputs in different scenarios. - -The placement policy can be used independently, with and without other polices -attached to the same cluster. So the policy needs to understand whether there -are policy decisions from other policies (such as a -:doc:`scaling policy `). - -When the policy is checked, it will first attempt to get the proper ``count`` -input value, which may be an outcome from other policies or the inputs for -the action. For more details, check the scenarios described in following -sections. - - -Scenarios -~~~~~~~~~ - -S1: ``CLUSTER_SCALE_IN`` ------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``deletion`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``deletion`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``deletion`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -deleted), it validates the list of availability zone names provided to the -policy. If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No availability zone found available.", - } - -With the list of availability zones known to be good and the map of node -distribution specified in the policy spec, senlin engine continues to -calculate a distribution plan that best matches the desired distribution. -If there are nodes that cannot be fit into the distribution plan, the policy -check fails with an error recorded in the action's ``data``, as shown below: - -:: - - { - "status": "ERROR", - "reason": "There is no feasible plan to handle all nodes." - } - -If there is a feasible plan to remove nodes from each availability zone, the -policy saves the plan into the ``data`` property of the action as exemplified -below: - -:: - - { - "status": "OK", - "deletion": { - "count": 3, - "zones": { - "nova-1": 2, - "nova-2": 1 - } - } - } - -This means in total, 3 nodes should be removed from the cluster. Among them, -2 nodes should be selected from availability zone "``nova-1``" and the rest -one should be selected from availability zone "``nova-2``". - -**NOTE**: When there is a :doc:`deletion policy ` attached to the -same cluster. That deletion policy will be evaluated after the zone placement -policy and it is expected to rebase its candidate selection on the zone -distribution enforced here. For example, if the deletion policy is tasked to -select the oldest nodes for deletion, it will adapt its behavior to select -the oldest nodes from each availability zone. The number of nodes to be chosen -from each availability zone would be based on the output from this placement -policy. - - -S2: ``CLUSTER_SCALE_OUT`` -------------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy attempts to extract the -``count`` value from the ``creation`` field. If the ``count`` value is not -found, 1 is assumed to be the default. - -If, however, the policy fails to find the ``creation`` field, it tries to find -if there is a ``count`` field in the action's ``inputs`` property. If the -answer is true, the policy will use it, or else it will fall back to assume 1 -as the default node count. - -After the policy has find out the ``count`` value (i.e. number of nodes to be -created), it validates the list of availability zone names provided to the -policy and extracts the current distribution of nodes among those availability -zones. - -If for some reason, none of the provided names passed the validation, -the policy check fails with the following data recorded in the action's -``data`` property: - -:: - - { - "status": "ERROR", - "reason": "No availability zone found available.", - } - -The logic of generating a distribution plan is almost identical to what have -been described in scenario *S1*, except for the output format. When there is -a feasible plan to accommodate all nodes, the plan is saved into the ``data`` -property of the action as shown in the following example: - -:: - - { - "status": "OK", - "creation": { - "count": 3, - "zones": { - "nova-1": 1, - "nova-2": 2 - } - } - } - -This means in total, 3 nodes should be created into the cluster. Among them, -2 nodes should be created at availability zone "``nova-1``" and the left one -should be created at availability zone "``nova-2``". - -S3: ``CLUSTER_RESIZE`` ----------------------- - -The placement policy first checks if there are policy decisions from other -policies by looking into the ``creation`` field of the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If the ``creation`` field is not found, the policy -tries to find if there is a ``deletion`` field in the action's ``data`` -property. If there is such a field, the policy extracts the ``count`` value -from the ``creation`` field. If neither ``creation`` nor ``deletion`` is found -in the action's ``data`` property, the policy proceeds to parse the raw inputs -of the action. - -The output from the parser may indicate an invalid combination of input -values. If that is the case, the policy check fails with the action's -``data`` set to something like the following example: - -:: - - { - "status": "ERROR", - "reason": - } - -If the parser successfully parsed the action's raw inputs, the policy tries -again to find if there is either ``creation`` or ``deletion`` field in the -action's ``data`` property. It will use the ``count`` value from the field -found as the number of nodes to be handled. - -When the placement policy finds out the number of nodes to create (or delete), -it proceeds to calculate a distribution plan. If the action is about growing -the size of the cluster, the logic and the output format are the same as that -have been outlined in scenario *S2*. Otherwise, the logic and the output -format are identical to that have been described in scenario *S1*. - -S4: ``NODE_CREATE`` -------------------- - -When handling a ``NODE_CREATE`` action, the zone placement policy needs to -process the single node associated with the action, i.e. the node to be -created. If, however, the node is referencing a profile whose spec contains -a ``availability_zone`` property, it means the requesting user has a preferred -availability zone for the new node. In this case, the placement policy will -return directly without choosing availability zone for the node. - -If the profile spec doesn't have ``availability_zone`` specified, the -placement policy will proceed to do an evaluation of the current zone -distribution followed by a calculation of distribution plan so that the new -node will be deployed in a proper availability zone. These logics and the -output format are identical to that in scenario *S2*. diff --git a/doc/source/contributor/policy.rst b/doc/source/contributor/policy.rst deleted file mode 100644 index 7385525cd..000000000 --- a/doc/source/contributor/policy.rst +++ /dev/null @@ -1,146 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Policies -======== - -A policy is a wrapper of a collection of rules that will be checked/enforced -when Senlin performs some operations on the objects it manages. The design -goals of policy support in Senlin are flexibility and customizability. We -strive to make the policies flexible so that we can accommodate diverse types -of policies for various usage scenarios. We also want to make policy type -development an easier task for developers to introduce new policies and/or -customize existing ones for their needs. - - -Policy Properties -~~~~~~~~~~~~~~~~~ - -A policy object has the following properties: - -- ``id``: a string containing the globally unique ID for the object; -- ``name``: a string containing the name of the policy object; -- ``type``: a string containing the name of the policy type; -- ``spec``: a map containing the validated specification for the object; -- ``created_at``: timestamp of the object creation; -- ``updated_at``: timestamp of last update to the object; -- ``data``: a map containing some private data for the policy object; - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -When the Senlin API receives a request to create a policy object, it first -checks if the JSON body contains a map named ``policy`` that has the ``name`` -and ``spec`` keys and values associated with them. If any of these keys are -missing, the request will be treated as an invalid one and rejected. - -After the preliminary request validation done at the Senlin API layer, Senlin -engine will further check whether the specified policy type does exist and -whether the specified ``spec`` can pass the validation logic in the policy -type implementation. If this phase of validation is successful, a policy -object will be created and saved into the database, then a map containing the -details of the object will be returned to the requester. If any of these -validations fail, an error message will be returned to the requester instead. - - -Listing Policies -~~~~~~~~~~~~~~~~ - -Policy objects can be listed using the Senlin API. When querying the policy -objects, a user can specify the following query parameters, individually or -combined: - -- ``filters``: a map containing key-value pairs that will be used for matching - policy records. Records that fail to match this criteria will be filtered - out. The following strings are valid keys: - - * ``name``: name of policies to list, can be a string or a list of strings; - * ``type``: type name of policies, can be a string or a list of strings; - -- ``limit``: a number that restricts the maximum number of records to be - returned from the query. It is useful for displaying the records in pages - where the page size can be specified as the limit. -- ``marker``: A string that represents the last seen UUID of policies in - previous queries. This query will only return results appearing after the - specified UUID. This is useful for displaying records in pages. -- ``sort``: A string to enforce sorting of the results. It can accept a list of - known property names as sorting keys separated by commas. For each sorting - key, you can append either ``:asc`` or ``:desc`` as its sorting order. By - default, ``:asc`` is assumed to be the sorting direction. -- ``global_project``: A boolean indicating whether policy listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only policies from the current project that match the other - criteria will be returned. When this value is specified as True, policies - that matching all other criteria would be returned, no matter in which - project a policy was created. Only a user with admin privilege is permitted - to do a global listing. - - -The Senlin API performs some basic checks on the data type and values of the -provided parameters and then passes the request to Senlin engine. When there -are policy objects matching the query criteria, a list of policy objects is -returned to the requester. If there is no matching record, the result will be -an empty list. - - -Getting a Policy -~~~~~~~~~~~~~~~~ - -A user can provide one of the UUID, the name or the short ID of policy object -to the Senlin API ``policy_show`` to retrieve the details about a policy. - -If a policy object matching the criteria is found, Senlin API returns the -object details in a map; if more than one object is found, Senlin API returns -an error message telling users that there are multiple choices; if no object -is found matching the criteria, a different error message will be returned to -the requester. - - -Updating a Policy -~~~~~~~~~~~~~~~~~ - -After a policy is created, a user can send requests to the Senlin API for -changing some of its properties. To avoid potential state conflicts inside the -Senlin engine, we currently don't allow changes to the ``spec`` property of -a policy. However, changing the ``name`` property is permitted. - -When validating the requester provided parameters, Senlin API will check if -the values are of valid data types and whether the values fall in allowed -ranges. After this validation, the request is forwarded to Senlin engine for -processing. - -Senlin engine will try to find the policy using the specified policy identity -as the UUID, the name or a short ID of the policy object. When no matching -object is found or more than one object is found, an error message is returned -to the user. Otherwise, the engine updates the object property and returns the -object details in a map. - - -Deleting a Policy -~~~~~~~~~~~~~~~~~ - -A user can specify the UUID, the name or the short ID of a policy object when -sending a ``policy_delete`` request to the Senlin API. - -Senlin engine will try to find the matching policy object using the specified -identity as the UUID, the name or a short ID of the policy object. When no -matching object is found or more than one object is found, an error message is -returned. Otherwise, the API returns a 204 status to the requester indicating -that the deletion was successful. - -To prevent deletion of policies that are still in use by any clusters, the -Senlin engine will try to find if any bindings exist between the specified -policy and a cluster. An error message will be returned to the requester if -such a binding is found. diff --git a/doc/source/contributor/policy_type.rst b/doc/source/contributor/policy_type.rst deleted file mode 100644 index 1aad11115..000000000 --- a/doc/source/contributor/policy_type.rst +++ /dev/null @@ -1,293 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============ -Policy Types -============ - -A :doc:`policy ` policy is a set of rules that are checked -and enforced. The checking can be done before or after an action's execution -or both. Policies are of different policy types, each of which is designed to -make sure that a cluster's behavior follows certain patterns or complies with -certain restrictions. - -When released, Senlin comes with some built-in policy types to meet the -requirements found in some typical use cases. However, the distributors or the -users can always augment their collection of policy types by implementing -their own ones. - -Policy type implementations are managed as Senlin plugins. The plan is to have -Senlin engine support dynamical loading of plugins from user specified modules -and classes. Currently, this can be achieved by adding new ``senlin.policies`` -entries in the ``entry_points`` section in the ``setup.cfg`` file, followed by -a reinstall of the Senlin service, i.e. ``sudo pip install`` command. - - -The Base Class ``Policy`` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The base class ``Policy`` provides some common logics regarding the following -operations: - -- The initialization of the ``spec_data`` property, based on the - ``spec_schema`` definition and the ``spec`` input. -- The serialization and deserialization of a policy object into/from database. -- The serialization and deserialization of a policy object into/from a dict. -- The default validation operation for the ``spec_data`` property. -- Default implementations for the following methods which are to be overridden - by a policy type implementation: - - * ``attach(cluster_id, action)``: a method that will be invoked when a policy - object of this type is attached to a cluster. - * ``detach(cluster_id, action)``: a method that will be invoked when a policy - object of this type is detached from a cluster. - * ``pre_op(cluster_id, action)``: a method that will be invoked before an - action is executed; - * ``post_op(cluster_id, action)``: a method that will be invoked after an - action is executed. - - -The ``VERSIONS`` Property -------------------------- - -Each policy type class has a ``VERSIONS`` class property that documents the -changes to the policy type. This information is returned when users request -to list all policy types supported. - -The ``VERSIONS`` property is a dict with version numbers as keys. For each -specific version, the value is list of support status changes made to the -policy type. Each change record contains a ``status`` key whose value is one -of ``EXPERIMENTAL``, ``SUPPORTED``, ``DEPRECATED`` or ``UNSUPPORTED``, and a -``since`` key whose value is of format ``yyyy.mm`` where ``yyyy`` and ``mm`` -are the year and month of the release that bears the change to the support -status. For example, the following record indicates that the specific policy -type was introduced in April, 2016 (i.e. version 1.0 release of Senlin) as -an experimental feature; later, in October, 2016 (i.e. version 2.0 release of -Senlin) it has graduated into a mature feature supported by the developer -team. - -.. code:: python - - VERSIONS = { - '1.0': [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - - -Providing New Policy Types -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Adding new policy type implementations is an easy task with only a few steps -to follow. - - -Develop A New Policy Type -------------------------- - -The first step for adding a new policy type is to create a new file containing -a subclass of ``Policy``. Then you will define the spec schema for the new -policy type in a Python dictionary named ``spec_schema``. - - -Defining Spec Schema --------------------- - -Each key in this dictionary represents a property name; the value of it is an -object of one of the schema types listed below: - -- ``String``: A string property. -- ``Boolean``: A boolean property. -- ``Integer``: An integer property. -- ``List``: A property containing a list of values. -- ``Map``: A property containing a map of key-value pairs. - -For example: - -.. code:: python - - spec_schema = { - 'destroy_after_delete': schema.Boolean( - 'Boolean indicating whether object will be destroyed after deletion.', - default=True, - ), - ... - } - - -If a property value will be a list, you can further define the type of items -the list can accept. For example: - -.. code:: python - - spec_schema = { - 'criteria': schema.List( - 'Criteria for object selection that will be evaluated in order.', - schema=schema.String('Name of a criterion'), - ), - ... - } - -If a property value will be a map of key-value pairs, you can define the -schema of the map, which is another Python dictionary containing definitions -of properties. For example: - -.. code:: python - - spec_schema = { - 'strategy': schema.Map( - 'Strategy for dealing with servers with different states.', - schema={ - 'inactive': 'boot', - 'deleted': 'create', - 'suspended': 'resume', - }, - ), - ... - } - -When creating a schema type object, you can specify the following keyword -arguments to gain a better control of the property: - -- ``default``: a default value of the expected data type; -- ``required``: a boolean value indicating whether a missing of the property - is acceptable when validating the policy spec; -- ``constraints``: a list of ``Constraint`` objects each of which defines a - constraint to be checked. Senlin currently only support ``AllowedValues`` - constraint. - - -Applicable Profile Types ------------------------- - -Not all policy types can be used on all profile types. For example, a policy -about load-balancing is only meaningful for objects that can handle workloads, -or more specifically, objects that expose service access point on an IP port. - -You can define what are the profile types your new policy type can handle by -specifying the ``PROFILE_TYPE`` property of your policy type class. The value -of ``PROFILE_TYPE`` is a list of profile type names. If a policy type is -designed to handle all profile types, you can specify a single entry ``ANY`` -as the value. See :doc:`profile types ` for profile type related -operations. - - -Policy Targets --------------- - -A policy type is usually defined to handle certain operations. The rules -embedded in the implementation may need to be checked before the execution of -an :doc:`action ` or they may need to be enforced after the execution -of the action. When an action is about to be executed or an action has -finished execution, the Senlin engine will check if any policy objects -attached to a cluster is interested in the action. If the answer is yes, the -engine will invoke the ``pre_op`` function or the ``post_op`` function -respectively, thus giving the policy object a chance to adjust the action's -behavior. - -You can define a ``TARGET`` property for the policy type implementation to -indicate the actions your policy type want to subscribe to. The ``TARGET`` -property is a list of tuple (``WHEN``, ``ACTION``). For example, the following -property definition indicates that the policy type is interested in the action -``CLUSTER_SCALE_IN`` and ``CLUSTER_DEL_NODES``. The policy type wants itself -be consulted *before* these actions are performed. - -.. code:: python - - class MyPolicyType(Policy): - ... - TARGET = [ - (BEFORE, consts.CLUSTER_SCALE_IN), - (BEFORE, consts.CLUSTER_DEL_NODES), - ] - ... - -When the corresponding actions are about to be executed, the ``pre_op`` -function of this policy object will be invoked. - - -Passing Data Between Policies ------------------------------ - -Each policy type may decide to send some data as additional inputs or -constraints for the action to consume. This is done by modifying the ``data`` -property of an ``Action`` object (see :doc:`action `). - -A policy type may want to check if there are other policy objects leaving some -policy decisions in the ``data`` property of an action object. - -Senlin allows for more than one policy to be attached to the same cluster. -Each policy, when enabled, is supposed to check a specific subset of cluster -actions. In other words, different policies may get checked before/after the -engine executes a specific cluster action. This design is effectively forming -a chain of policies for checking. The decisions (outcomes) from one policy -sometimes impact other policies that are checked later. - -To help other developers to understand how a specific policy type is designed -to work in concert with others, we require all policy type implementations -shipped with Senlin accompanied by a documentation about: - -* the ``action data`` items the policy type will consume, including how these - data will impact the policy decisions. -* the ``action.data`` items the policy type will produce, thus consumable by - any policies downstream. - -For built-in policy types, the protocol is documented below: - -.. toctree:: - :maxdepth: 1 - - policies/affinity_v1 - policies/deletion_v1 - policies/load_balance_v1 - policies/region_v1 - policies/scaling_v1 - policies/zone_v1 - - -Registering The New Policy Type -------------------------------- - -For Senlin service to be aware of and thus to make use of the new policy type -you have just developed, you will register it to the Senlin service. -Currently, this is done through a manual process shown below. In future, -Senlin will provide dynamical loading support to policy type plugins. - -To register a new plugin type, you will add a line to the ``setup.cfg`` file -that can be found at the root directory of Senlin code base. For example: - -:: - - [entry_points] - senlin.policies = - ScalingPolicy = senlin.policies.scaling_policy:ScalingPolicy - MyCoolPolicy = : - -Finally, save that file and do a reinstall of the Senlin service, followed -by a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - - -Now, when you do a :command:`openstack cluster policy type list`, you will see -your policy type listed along with other existing policy types. diff --git a/doc/source/contributor/profile.rst b/doc/source/contributor/profile.rst deleted file mode 100644 index 2719e98d8..000000000 --- a/doc/source/contributor/profile.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Profiles -======== - -A profile is an object instantiated from a "profile type" and it is used as -the specification for creating a physical object to be managed by Senlin. The -"physical" adjective here is used to differentiate such an object from its -counterpart, the "logical" object, which is referred to as a node in Senlin. - -As the specification for physical object creation, a profile contains almost -every piece of information needed for the underlying driver to create an -object. After a physical object is created, its UUID will be assigned to the -``physical_id`` property of a node as reference. When a physical object is -deleted, the ``physical_id`` property will be set to ``None``. - -Although not required, a profile may reference the node object's properties -when creating a physical object. For example, a profile may use the node's -``index`` property value for generating a name for the object; a profile may -customize an object's property based on the ``role`` property value of a node. -It is up to the profile type author and the specific use case how a profile is -making use of the properties of a node. - - -Profile Properties -~~~~~~~~~~~~~~~~~~ - -A profile object has the following properties: - -- ``id``: a global unique ID assigned to the object after creation; -- ``name``: a string representation of the profile name; -- ``type``: a string referencing the profile type used; -- ``context``: a map of key-value pairs that contains credentials and/or - parameters for authentication with an identity service. When a profile is - about to create an object, it will use data stored here to establish a - connection to a service; -- ``spec``: a map of key-value pairs that contains the specification for - object creation. The content of this property is dictated by the - corresponding profile type. -- ``metadata``: a map of key-value pairs associated with the profile; -- ``created_at``: the timestamp when the profile was created; -- ``updated_at``: the timestamp when the profile was last updated; - -The ``spec`` property is the most important property for a profile. It is -immutable, i.e. the only way to "change" the ``spec`` property is to create -a new profile. By restricting changes to this property, Senlin can do a better -job in managing the object configurations. - - -Creating A Profile -~~~~~~~~~~~~~~~~~~ - -When creating a profile using the ``profile_create`` API, a user must provide -the ``name`` and ``spec`` parameters. All other parameters are optional. - -The provided ``spec`` map will be validated using the validation logic -provided by the corresponding profile type. If the validation succeeds, the -profile will be created and stored into the database. Senlin engine returns -the details of the profile as a dict back to Senlin API and eventually to the -requesting user. If the validation fails, Senlin engine returns an error -message describing the reason of the failure. - - -Listing Profiles -~~~~~~~~~~~~~~~~ - -Senlin profiles an API for listing all profiles known to the Senlin engine. -When querying the profiles, users can provide any of the following parameters: - -- ``filters``: a map of key-value pairs to filter profiles, where each key can - be one of the following word and the value(s) are for the Senlin engine to - match against all profiles. - - - ``name``: profile name for matching; - - ``type``: profile type for matching; - - ``metadata``: a string for matching profile metadata. - -- ``limit``: an integer that specifies the maximum number of records to be - returned from the API call; -- ``marker``: a string specifying the UUID of the last seen record; only those - records that appear after the given value will be returned; -- ``sort``: A string to enforce sorting of the results. It accepts a list of - known property names of a profile as sorting keys separated by commas. Each - sorting key can optionally have either ``:asc`` or ``:desc`` appended to the - key for controlling the sorting direction. -- ``global_project``: A boolean indicating whether profile listing should be - done in a tenant-safe way. When this value is specified as False (the - default), only profiles from the current project that match the other - criteria will be returned. When this value is specified as True, profiles - that matching all other criteria would be returned, no matter in which - project a profile was created. Only a user with admin privilege is permitted - to do a global listing. - -If there are profiles matching the query criteria, Senlin API returns a list -named ``profiles`` where each entry is a JSON map containing details about a -profile object. Otherwise, an empty list or an error message will be returned -depending on whether the query was well formed. - - -Getting A Profile -~~~~~~~~~~~~~~~~~ - -A user can provide one of the following values in attempt to retrieve the -details of a specific profile. - -- Profile UUID: Query is performed strictly based on the UUID value given. This - is the most precise query supported in Senlin. -- Profile name: Senlin allows multiple profiles to have the same name. It is - user's responsibility to avoid name conflicts if needed. Senlin engine will - return a message telling users that multiple profiles found matching this - name if the provided name cannot uniquely identify a profile. -- short ID: Considering that UUID is a long string not so convenient to input, - Senlin supports a short version of UUIDs for query. Senlin engine will use - the provided string as a prefix to attempt a matching in the database. When - the "ID" is long enough to be unique, the details of the matching profile is - returned, or else Senlin will return an error message indicating that - multiple profiles were found matching the specified short ID. - - -Updating A Profile -~~~~~~~~~~~~~~~~~~ - -Once a profile object is created, a user can request its properties to be -updated. Updates to the ``name`` or ``metadata`` properties are applied on -the specified profile object directly. Changing the ``spec`` property of a -profile object is not permitted. - - -Deleting A Profile -~~~~~~~~~~~~~~~~~~ - -A user can provide one of profile UUID, profile name or a short ID of a -profile when requesting a profile object to be deleted. Senlin engine will -check if there are still any clusters or nodes using the specific profile. -Since a profile in use cannot be deleted, if any such clusters or nodes are -found, an error message will be returned to user. diff --git a/doc/source/contributor/profile_type.rst b/doc/source/contributor/profile_type.rst deleted file mode 100644 index a94be0271..000000000 --- a/doc/source/contributor/profile_type.rst +++ /dev/null @@ -1,271 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============= -Profile Types -============= - -In Senlin, each node is associated with a physical object created by -instantiating a :doc:`profile `. Profiles themselves are objects -instantiated from "profile types". In other words, a profile type provides the -specification for creating profiles while a profile can be used to create -multiple homogeneous objects. - -Profile type implementations are managed as plugins. Users can use the -built-in profile types directly and they can provide their own implementation -of new profile types. The plan is to have Senlin engine support dynamical -loading of plugins. Currently, this can be done by adding new -``senlin.profiles`` entry in the ``entry_points`` section in the ``setup.cfg`` -file followed by a reinstall (i.e. ``pip install``) operation. - - -The Base Class 'Profile' -~~~~~~~~~~~~~~~~~~~~~~~~ - -The base class ``Profile`` provides some common logics regarding the following -operations: - -- the initialization of the ``spec_data`` based on the ``spec_schema`` - property and the ``spec`` input. -- the initialization of a basic request context using the Senlin service - credentials. -- the serialization and deserialization of profile object into/from database. -- the validation of data provided through ``spec`` field of the profile; -- the north bound APIs that are provided as class methods, including: - - * ``create_object()``: create an object using logic from the profile type - implementation, with data from the profile object as inputs; - * ``delete_object()``: delete an object using the profile type - implementation; - * ``update_object()``: update an object by invoking operation provided by a - profile type implementation, with data from a different profile object as - inputs; - * ``get_details()``: retrieve object details into a dictionary by invoking - the corresponding method provided by a profile type implementation; - * ``join_cluster()``: a hook API that will be invoked when an object is made - into a member of a cluster; the purpose is to give the profile type - implementation a chance to make changes to the object accordingly; - * ``leave_cluster()``: a hook API that will be invoked when an object is - removed from its current cluster; the purpose is to give the profile type - implementation a chance to make changes to the object accordingly; - * ``recover_object()``: recover an object with operation given by inputs from - the profile object. By default, ``recreate`` is used if no operation is - provided to delete firstly then create the object. - - -Abstract Methods ----------------- - -In addition to the above logics, the base class ``Profile`` also defines some -abstract methods for a profile type implementation to implement. When invoked, -these methods by default return ``NotImplemented``, a special value that -indicates the method is not implemented. - -- ``do_create(obj)``: an object creation method for a profile type - implementation to override; -- ``do_delete(obj)``: an object deletion method for a profile type - implementation to override; -- ``do_update(obj, new_profile)``: an object update method for a profile type - implementation to override; -- ``do_check(obj)``: a method that is meant to do a health check over the - provided object; -- ``do_get_details(obj)``: a method that can be overridden so that the caller - can get a dict that contains properties specific to the object; -- ``do_join(obj)``: a method for implementation to override so that profile - type specific changes can be made to the object when object joins a cluster. -- ``do_leave(obj)``: a method for implementation to override so that profile - type specific changes can be made to the object when object leaves its - cluster. -- ``do_recover(obj)``: an object recover method for a profile type - implementation to override. Nova server, for example, overrides the recover - operation by ``REBUILD``. - - -The ``VERSIONS`` Property -------------------------- - -Each profile type class has a ``VERSIONS`` class property that documents the -changes to the profile type. This information is returned when users request -to list all profile types supported. - -The ``VERSIONS`` property is a dict with version numbers as keys. For each -specific version, the value is list of support status changes made to the -profile type. Each change record contains a ``status`` key whose value is one -of ``EXPERIMENTAL``, ``SUPPORTED``, ``DEPRECATED`` or ``UNSUPPORTED``, and a -``since`` key whose value is of format ``yyyy.mm`` where ``yyyy`` and ``mm`` -are the year and month of the release that bears the change to the support -status. For example, the following record indicates that the specific profile -type was introduced in April, 2016 (i.e. version 1.0 release of Senlin) as -an experimental feature; later, in October, 2016 (i.e. version 2.0 release of -Senlin) it has graduated into a mature feature supported by the developer -team. - -.. code:: python - - VERSIONS = { - '1.0': [ - { - "status": "EXPERIMENTAL", - "since": "2016.04" - }, - { - "status": "SUPPORTED", - "since": "2016.10" - } - ] - } - - -The ``context`` Property ------------------------- - -In the ``Profile`` class, there is a special property named ``context``. This -is the data structure containing all necessary information needed when the -profile type implementation wants to authenticate with a cloud platform. -Refer to :doc:`authorization `, Senlin makes use of the trust -mechanism provided by the OpenStack Keystone service. - -The dictionary in this ``context`` property by default contains the credentials -for the Senlin service account. Using the trust built between the requesting -user and the service account, a profile type implementation can authenticate -itself with the backend Keystone service and then interact with the supporting -service like Nova, Heat etc. - -All profile type implementations can include a ``context`` key in their spec, -the default value is an empty dictionary. A user may customize the contents -when creating a profile object by specifying a ``region_name``, for example, -to enable a multi-region cluster deployment. They could even specify a -different ``auth_url`` so that a cluster can be built across OpenStack clouds. - - -Providing New Profile Types -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When released, Senlin provides some built-in profile types. However, -developing new profile types for Senlin is not a difficult task. - - -Develop a New Profile Type --------------------------- - -The first step is to create a new file containing a subclass of ``Profile``. -Then you will define the spec schema for the new profile which is a python -dictionary named ``spec_schema``, with property names as keys. For each -property, you will specify its value to be an object of one of the schema -types listed below: - -- ``String``: A string property. -- ``Boolean``: A boolean property. -- ``Integer``: An integer property. -- ``List``: A property containing a list of values. -- ``Map``: A property containing a map of key-value pairs. - -For example: - -.. code:: python - - spec_schema = { - 'name': schema.String('name of object'), - 'capacity': schema.Integer('capacity of object', default=10), - 'shared': schema.Boolean('whether object is shared', default=True) - } - -If a profile property is a ``List``, you can further define the type of -elements in the list, which can be a ``String``, a ``Boolean``, an -``Integer`` or a ``Map``. For example: - -.. code:: python - - spec_schema = { - ... - 'addresses': schema.List( - 'address of object on each network', - schema=schema.String('address on a network') - ), - ... - } - -If a profile property is a ``Map``, you can further define the "schema" of that -map, which itself is another Python dictionary containing property -definitions. For example: - -.. code:: python - - spec_schema = { - ... - 'dimension': schema.Map( - 'dimension of object', - schema={ - 'length': schema.Integer('length of object'), - 'width': schema.Integer('width of object') - } - ) - ... - } - - -By default, a property is not required. If a property has to be provided, you -can specify ``required=True`` in the property type constructor. For example: - -.. code:: python - - spec_schema = { - ... - 'name_length': schema.Integer('length of name', required=True) - ... - } - -A property can have a default value when no value is specified. If a property -has a default value, you don't need to specify it is required. For example: - -.. code:: python - - spec_schema = { - ... - 'min_size': schema.Integer('minimum size of object', default=0) - ... - } - -After the properties are defined, you can continue to work on overriding the -abstract methods inherited from the base ``Profile`` type as appropriate. - - -Registering a New Profile Type ------------------------------- - -For Senlin to make use of the new profile type you have just developed, you -will register it to Senlin service. Currently, this is done through a manual -process. In future, Senlin will provide dynamical loading support to profile -type plugins. - -To register a new profile type, you will add a line to the ``setup.cfg`` file -that can be found at the root directory of Senlin code base. For example: - -:: - - [entry_points] - senlin.profiles = - os.heat.stack-1.0 = senlin.profiles.os.heat.stack:StackProfile - os.nova.server-1.0 = senlin.profiles.os.nova.server:ServerProfile - my.cool.profile-1.0 = : - -Finally, save that file and do a reinstall of the Senlin service, followed by -a restart of the ``senlin-engine`` process. - -:: - - $ sudo pip install -e . - -Now, when you do a :command:`openstack cluster profile type list`, you will -see your profile type listed along with other existing profile types. diff --git a/doc/source/contributor/receiver.rst b/doc/source/contributor/receiver.rst deleted file mode 100644 index 99116451f..000000000 --- a/doc/source/contributor/receiver.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== -Receiver -======== - -Concept -~~~~~~~ - -A :term:`Receiver` is an abstract resource created in Senlin engine to handle -operation automation. You can create a receiver to trigger a specific action -on a cluster on behalf of a user when some external alarms or events are -fired. - -A receiver can be of different types. The ``type`` of a receiver is specified -when being created. Currently, two receiver types are supported: ``webhook`` -and ``message``. For a ``webhook`` receiver, a :term:`Webhook` URI is generated -for users or programs to trigger a cluster action by send a HTTP POST request. -For a ``message`` receiver, a Zaqar queue is created for users or programs to -trigger a cluster action by sending a message. - -A receiver encapsulates the information needed for triggering an action. These -information may include: - -* ``actor``: the credential of a user on whose behalf the action will be - triggered. This is usually the user who created the receiver, but it can be - any other valid user explicitly specified when the receiver is created. -* ``cluster_id``: the ID of the targeted cluster. It is required only for - ``webhook`` receivers. -* ``action``: the name of an action that is applicable on a cluster. It is - required only for ``webhook`` receivers. -* ``params``: a dictionary feeding argument values (if any) to the action. It - is optional for all types of receivers. - -In the long term, senlin may support user-defined actions where ``action`` -will be interpreted as the UUID or name of a user-defined action. - - -Creating a Receiver -~~~~~~~~~~~~~~~~~~~ - -Creating a webhook receiver ---------------------------- - -When a user requests to create a webhook receiver by invoking the -:program:`openstack` command, the request comes with at least three -parameters: the receiver type which should be ``webhook``, the targeted -cluster and the intended action to invoke when the receiver is triggered. -Optionally, the user can provide some additional parameters to use and/or -the credentials of a different user. - -When the Senlin API service receives the request, it does three things: - -* Validating the request and rejects it if any one of the following conditions - is met: - - - the receiver type specified is not supported; - - the targeted cluster can not be found; - - the targeted cluster is not owned by the requester and the requester does - not have an "``admin``" role in the project; - - the provided action is not applicable on a cluster. - -* Creating a receiver object that contains all necessary information that will - be used to trigger the specified action on the specified cluster. - -* Creating a "channel" which contains information users can use to trigger - a cluster action. For the ``webhook`` receiver, this is a URL stored in - the ``alarm_url`` field and it looks like:: - - http://{host:port}/v1/webhooks/{webhook_id}/trigger?V=2 - - **NOTE**: The ``V=2`` above is used to encode the current webhook triggering - protocol. When the protocol changes in future, the value will be changed - accordingly. - -Finally, Senlin engine returns a dictionary containing the properties of the -receiver object. - -Creating a message receiver ---------------------------- - -When a user requests to create a message receiver by invoking :program:`openstack` -command, the receiver type ``message`` is the only parameter need to be specified. - -When the Senlin API service receives the request, it does the following things: - -* Validating the request and rejecting it if the receiver type specified is not - supported; - -* Creating a receiver object whose cluster_id and action properties are `None`; - -* Creating a "channel" which contains information users can use to trigger - a cluster action. For a ``message`` receiver, the following steps are - followed: - - - Creating a Zaqar queue whose name has the ``senlin-receiver-`` prefix. - - Building a trust between the requester (trustor) and the Zaqar trustee - user (trustee) if this trust relationship has not been created yet. - The ``trust_id`` will be used to create message subscriptions in the next - step. - - Creating a Zaqar subscription targeting on the queue just created and - specifying the HTTP subscriber to the following URL:: - - http://{host:port}/v1/v1/receivers/{receiver_id}/notify - - - Storing the name of queue into the ``queue_name`` field of the receiver's - channel. - -Finally, Senlin engine returns a dictionary containing the properties of the -receiver object. - - -Triggering a Receiver -~~~~~~~~~~~~~~~~~~~~~ - -Different types of receivers are triggered in different ways. For example, a -``webhook`` receiver is triggered via the ``alarm_url`` channel; a message -queue receiver can be triggered via messages delivered in a shared queue. - - -Triggering a Webhook --------------------- - -When triggering a webhook, a user or a software sends a ``POST`` request to -the receiver's ``alarm_url`` channel, which is a specially encoded URL. This -request is first processed by the ``webhook`` middleware before arriving at -the Senlin API service. - -The ``webhook`` middleware checks this request and parses the format of the -request URL. The middleware attempts to find the receiver record from Senlin -database and see if the named receiver does exist. If the receiver is found, -it then tries to load the saved credentials. An error code 404 will be -returned if the receiver is not found. - -After having retrieved the credentials, the middleware will proceed to get a -Keystone token using credentials combined with Senlin service account info. -Using this token, the triggering request can proceed along the pipeline of -middlewares. An exception will be thrown if the authentication operation fails. - -When the senlin engine service receives the webhook triggering request it -creates an action based on the information stored in the receiver object. -The newly created action is then dispatched and scheduled by a scheduler to -perform the expected operation. - -Triggering a Message Receiver ------------------------------ - -When triggering a message receiver, a user or a software needs to send -message(s) to the Zaqar queue whose name can be found from the channel data of -the receiver. Then the Zaqar service will notify the Senlin service for the -message(s) by sending a HTTP POST request to the Senlin subscriber URL. -Note: this POST request is sent using the Zaqar trustee user credential -and the ``trust_id`` defined in the subscriber. Therefore, Senlin will -recognize the requester as the receiver owner rather than the Zaqar service -user. - -Then Senlin API then receives this POST request, parses the authentication -information and then makes a ``receiver_notify`` RPC call to the senlin engine. - -The Senlin engine receives the RPC call, claims message(s) from Zaqar and then -builds action(s) based on payload contained in the message body. A message will -be ignored if any one of the following conditions is met: - - - the ``cluster`` or the ``action`` field cannot be found in message body; - - the targeted cluster cannot be found; - - the targeted cluster is not owned by the receiver owner and the receiver - owner does not have "``admin``" role in the project; - - the provided action is not applicable on a cluster. - -Then those newly created action(s) will be scheduled to run to perform the -expected operation. - -Credentials -~~~~~~~~~~~ - -Webhook Receiver ----------------- - -When requesting to create a ``webhook`` receiver, the requester can choose to -provide some credentials by specifying the ``actor`` property of the receiver. -This information will be used for invoking the webhook in the future. There -are several options to provide these credentials. - -If the ``credentials`` to use is explicitly specified, Senlin will save it in -the receiver DB record. When the webhook is invoked later, the saved -credentials will be used for authentication with Keystone. Senlin engine -won't check if the provided credentials actually works when creating the -receiver. The check is postponed to the moment when the receiver is triggered. - -If the ``credentials`` to use is not explicitly provided, Senlin will assume -that the receiver will be triggered in the future using the requester's -credential. To make sure the future authentication succeeds, Senlin engine -will extract the ``user`` ID from the invoking context and create a trust -between the user and the ``senlin`` service account, just like the way how -Senlin deals with other operations. - -The requester must be either the owner of the targeted cluster or he/she has -the ``admin`` role in the project. This is enforced by the policy middleware. -If the requester is the ``admin`` of the project, Senlin engine will use the -cluster owner's credentials (i.e. a trust with the Senlin user in this case). - - -Message Receiver ----------------- - -When requesting to create a ``message`` receiver, the requester does not need -to provide any extra credentials. However, to enable token based authentication -for Zaqar message notifications, Zaqar trustee user information like -``auth_type``, ``auth_url``, ``username``, ``password``, ``project_name``, -``user_domain_name``, ``project_domain_name``, etc. must be configured in the -Senlin configuration file. By default, Zaqar trustee user is the same as Zaqar -service user, for example "zaqar". However, operators are also allowed to -specify other dedicated user as Zaqar trustee user for message notifying. -Therefore, please ensure Zaqar trustee user information defined in senlin.conf -are identical to the ones defined in zaqar.conf. diff --git a/doc/source/contributor/reviews.rst b/doc/source/contributor/reviews.rst deleted file mode 100644 index 02af38aac..000000000 --- a/doc/source/contributor/reviews.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======= -Reviews -======= - -About Global Requirements -~~~~~~~~~~~~~~~~~~~~~~~~~ - -When reviewing patches proposed by `OpenStack Proposal Bot`, we often quick -approve them if the patch successfully passed the gate jobs. However, we -should realize that these tests may contain some improvements or radical -changes to the packages senlin imports. - -A more appropriate workflow should be checking the version changes proposed -in such patches and examine the git log from each particular package. If there -are significant changes that may simplify senlin code base, we should propose -at least a TODO item to write down the needed changes to senlin so we adapt -senlin code to the new package. - - -About Trivial Changes -~~~~~~~~~~~~~~~~~~~~~ - -There are always disagreements across the community about trivial changes such -as grammar fixes, mis-spelling changes in comments etc. These changes are in -general okay to get merged, BUT our core reviewers should be aware that these -behavior are not encouraged. When we notice such behavior from some -developers, it is our responsibility to guide these developers to submit more -useful patches. We are not supposed to reject such changes as a punishment or -something like that. We are about building a great software with a great team. diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index f5fb8a1bc..000000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,338 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -============== -Senlin testing -============== - -Overview of Testing -~~~~~~~~~~~~~~~~~~~ - -The Senlin project currently has five different types of testing facilities in -place for developers to perform different kinds of tests: - -- *Unit Tests*: These are source code level testing that verifies the classes - and methods behave as implemented. Once implemented, these tests are also - used to guarantee that code behavior won't change accidentally by other - patches. -- *API Tests*: These tests treat the *senlin-api* and the *senlin-engine* as - black boxes. The test cases focus more on the API surface rather than how - each API is implemented. Once implemented, these tests help ensure that - the user-visible service interface don't change without a good reason. -- *Functional Tests*: These tests also treat the *senlin-api* and the - *senlin-engine* as block boxes. They focus more on the user perceivable - service behavior. Most tests are anticipated to test a particular "story" - and verify that the *senlin-engine* always behave consistently. -- *Integration Tests*: These are the tests that integrate senlin with other - OpenStack services and verify the senlin service can perform its operations - correctly when interacting with other services. -- *Stress Tests*: These are tests for measuring the performance of the - *senlin-api* and *senlin-engine* under different workloads. - - -Cloud Backends -~~~~~~~~~~~~~~ - -The senlin server is shipped with two collections of "cloud backends": one for -interacting with a real OpenStack deployment, the other for running complex -tests including api tests, functional tests, stress tests. The first cloud -backend is referred to as '`openstack`' and the second is referred to as -'`openstack_test`'. While the `openstack` cloud backend contains full featured -drivers for senlin to talk to the OpenStack services supported, the -`openstack_test` backend contains some "dummy" drivers that return fake -responses for service requests. The `openstack_test` driver is located at -:file:`senlin/tests/drivers` subdirectory. It is provided to facilitate tests -on the senlin service itself without involving any other OpenStack services. -Several types of tests can benefit from these "dummy" drivers because 1) they -can save developers a lot time on debugging complex issues when interacting -with other OpenStack services, and 2) they make running those types of tests -much easier and quicker. - -Note that "Integration Tests" are designed for senlin to interact with real -services so we should use the `openstack` backend rather than the -`openstack_test` backend. - -To configure the backend to use before running tests, you can check the -`[DEFAULT]` section in the configuration file :file:`/etc/senlin/senlin.conf`. - -:: - - [DEFAULT] - cloud_backend = openstack_test # use this for api, functional tests; - # or 'openstack' for production environment - # and integration tests. - - -Unit Tests -~~~~~~~~~~ - -All unit tests are to be placed in the :file:`senlin/tests/unit` sub-directory. -Test cases are organized by the targeted subsystems/modules. Each subsystem -directory must contain a separate blank __init__.py for tests discovery to -function properly. - -An example directory structure:: - - senlin - `- tests - `- unit - |-- db - | |-- __init__.py - | |-- test_cluster_api.py - | `-- test_node_api.py - |-- engine - | |-- __init__.py - | |-- test_clusters.py - | `-- test_nodes.py - |-- __init__.py - `-- test_utils.py - - -Writing a Unit Test -------------------- - -The *os-testr* software (see: https://pypi.org/project/os-testr/) is used to -find and run tests, parallelize their runs, and record timing/results. - -If new dependencies are introduced upon the development of a test, the -`test-requirements.txt` file needs to be updated so that the virtual -environment will be able to successfully execute all tests. - -The `test-requirements.txt` file needs to be synchronized with the -openstack/global-requirements project. Developers should try avoid -introducing additional package dependencies unless forced to. - - -Running Unit Tests ------------------- - -Senlin uses `tox` for running unit tests, as practiced by many other OpenStack -projects:: - - $ tox - -This by default will run unit tests suite with Python 2.7 and PEP8/HACKING -style checks. To run only one type of tests you can explicitly provide `tox` -with the test environment to use:: - - $ tox -e py27 # test suite on python 2.7 - $ tox -e pep8 # run full source code checker - -To run only a subset of tests, you can provide `tox` with a regex argument:: - - $ tox -e py27 -- -r ClusterTest - -To use debugger like `pdb` during test run, you have to run tests directly -with other, non-concurrent test runner instead of `testr`. -That also presumes that you have a virtual env with all senlin dependencies -installed and configured. - -A more convenient way to run specific test is to name the unit test directly, -as shown below:: - - $ python -m testtools.run senlin.tests.unit.db.test_cluster_api - -This command, however, is not using dependent packages in a particular virtual -environment as the `tox` command does. It is using the system-wide Python -package repository when running the tests. - - -API Tests -~~~~~~~~~ - -Senlin API test cases are written based on the *tempest* framework (see: -`tempest_overview`_). Test cases are developed using the Tempest Plugin -Interface (see: `tempest_plugin`_ ). - - -Writing an API Test Case ------------------------- - -API tests are hosted in the `senlin-tempest-plugin` project. When new APIs are added -or existing APIs are changed, an API test case should be added to the -:file:`senlin_tempest_plugin/tests/api` sub-directory, based on the resources impacted -by the change. - -Each test case should derive from the class -:class:`senlin_tempest_plugin.tests.api.base.BaseSenlinAPITest`. Positive -test cases should be separated from negative ones. We don't encourage combining -more than one test case into a single method, unless there is an obvious reason. - -To improve the readability of the test cases, Senlin has provided a utility -module which can be leveraged - :file:`senlin_tempest_plugin/common/utils.py`. - - -Running API Tests ------------------ - -Senlin API tests use fake OpenStack drivers to improve the throughput of test -execution. This is because in API tests, we don't care about the details in -how *senlin-engine* is interacting with other services. We care more about the -APIs succeeds in an expected way or fails in a predictable manner. - -Although the senlin engine is talking to fake drivers, the test cases still -need to communicate to the senlin API service as it would in a real -deployment. That means you will have to export your OpenStack credentials -before running the tests. For example, you will source the :file:`openrc` file -when using a devstack environment:: - - $ . $HOME/devstack/openrc - -This will ensure you have environment variables such as ``OS_AUTH_URL``, -``OS_USERNAME`` properly set and exported. The next step is to enter the -:file:`tempest` directory and run the tests there:: - - $ cd /opt/stack/tempest - $ nosetests -v -- senlin - -To run a single test case, you can specify the test case name. For example:: - - $ cd /opt/stack/tempest - $ nosetests -v -- \ - senlin_tempest_plugin.tests.api.clusters.test_cluster_create - -If you prefer running API tests in a virtual environment, you can simply use -the following command:: - - $ cd /opt/stack/senlin - $ tox -e api - - -Functional Tests -~~~~~~~~~~~~~~~~ - -Similar to the API tests, senlin functional tests are also developed based on -the *tempest* framework. Test cases are written using the Tempest Plugin -Interface (see: `tempest_plugin`_). - -.. _`tempest_overview`: https://docs.openstack.org/tempest/latest/ -.. _`tempest_plugin`: https://docs.openstack.org/tempest/latest/plugin - - -Writing Functional Tests ------------------------- - -Functional tests are hosted in the `senlin-tempest-plugin` project. There are current -a limited collection of functional test cases which can be -found under :file:`senlin_tempest_plugin/tests/functional/` subdirectory. In future, -we may add more test cases when needed. The above subdirectory will remain the -home of newly added functional tests. - -When writing functional tests, it is highly desirable that each test case is -designed for a specific use case or story line. - - -Running Functional Tests ------------------------- - -Similar to API tests, you will need to export your OpenStack credentials -before running any functional tests. - -The most straight forward way to run functional tests is to use the virtual -environment defined in the :file:`tox.ini` file, that is:: - - $ cd /opt/stack/senlin - $ tox -e functional - -If you prefer running a particular functional test case, you can do the -following as well:: - - $ cd /opt/stack/senlin - $ python -m testtools.run senlin_tempest_plugin.tests.functional.test_cluster_basic - - -Integration Tests -~~~~~~~~~~~~~~~~~ - -Integration tests are basically another flavor of functional tests. The only -difference from functional tests is that integration tests use real device -drivers so the *senlin-engine* is talking to real services. - - -Writing Integration Tests -------------------------- - -Integration tests are hosted in the `senlin-tempest-plugin` project. Integration tests -are designed to be run at Gerrit gate to ensure that changes to senlin code -won't break its interactions with other (backend) services. -Since OpenStack gate infrastructure is a shared resource pool for all -OpenStack projects, we are supposed to be very careful when adding new test -cases. The test cases added are supposed to focus more on the interaction -between senlin and other services than other things. - -All integration test cases are to be placed under the subdirectory -:file:`senlin_tempest_plugin/tests/integration`. Test cases are expected to be -organized into a small number of story lines that can exercise as many -interactions between senlin and backend services as possible. - -Each "story line" should be organized into a separate class module that -inherits from the ``BaseSenlinIntegrationTest`` class which can be found at -:file:`senlin_tempest_plugin/tests/integration/base.py` file. Each test case should -be annotated with a ``decorators.attr`` annotator and an idempotent ID as shown -below: - -.. code-block:: python - - from tempest.lib import decorators - - from senlin.tests.tempest.integration import base - - - class MyIntegrationTest(base.BaseSenlinIntegrationTest): - - @decorators.attr(type=['integration']) - @decorators.idempotent_id('') - def test_a_sad_story(self): - # Test logic goes here - # ... - - -Running Integration Tests -------------------------- - -The integration tests are designed to be executed at Gerrit gate. However, you -can still run them locally in your development environment, i.e. a devstack -installation. - -To run integration tests, you will need to configure *tempest* accounts by -editing the :file:`/etc/tempest/accounts.yaml` file. For each entry of the -tempest account, you will need to provide values for ``username``, -``tenant_name``, ``password`` at least. For example: - -.. code-block:: yaml - - - username: 'demo' - tenant_name: 'demo' - password: 'secretee' - -After this is configured, you can run a specific test case using the following -command: - -.. code-block:: console - - $ cd /opt/stack/senlin - $ python -m testtools.run \ - senlin_tempest_plugin.tests.integration.test_nova_server_cluster - - -Writing Stress Test Cases -------------------------- - - - - -Running Stress Tests --------------------- - - diff --git a/doc/source/ext/__init__.py b/doc/source/ext/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/doc/source/ext/resources.py b/doc/source/ext/resources.py deleted file mode 100644 index b02800005..000000000 --- a/doc/source/ext/resources.py +++ /dev/null @@ -1,291 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -*- coding: utf-8 -*- - -from docutils import nodes -from docutils.parsers import rst -from docutils.parsers.rst import directives -from functools import cmp_to_key -from oslo_utils import importutils -from sphinx.util import logging - -from senlin.common import schema - -LOG = logging.getLogger(__name__) - - -class SchemaDirective(rst.Directive): - required_arguments = 0 - optional_arguments = 0 - final_argument_whitespace = True - option_spec = {'package': directives.unchanged} - has_content = False - add_index = True - section_title = 'Spec' - properties_only = False - - def run(self): - """Build doctree nodes consisting for the specified schema class - - :returns: doctree node list - """ - - # gives you access to the options of the directive - options = self.options - - content = [] - - # read in package class - obj = importutils.import_class(options['package']) - - # skip other spec properties if properties_only is True - if not self.properties_only: - section = self._create_section(content, 'spec', - title=self.section_title) - - # create version section - version_section = self._create_section(section, 'version', - title='Latest Version') - field = nodes.line('', obj.VERSION) - version_section.append(field) - - # build versions table - version_tbody = self._build_table( - section, 'Available Versions', - ['Version', 'Status', 'Supported Since']) - sorted_versions = sorted(obj.VERSIONS.items()) - for version, support_status in sorted_versions: - for support in support_status: - cells = [version] - sorted_support = sorted(support.items(), reverse=True) - cells += [x[1] for x in sorted_support] - self._create_table_row(cells, version_tbody) - - # create applicable profile types - profile_type_description = ('This policy is designed to handle ' - 'the following profile types:') - profile_type_section = self._create_section( - section, 'profile_types', title='Applicable Profile Types') - field = nodes.line('', profile_type_description) - profile_type_section.append(field) - for profile_type in obj.PROFILE_TYPE: - profile_type_section += self._create_list_item(profile_type) - - # create actions handled - policy_trigger_description = ('This policy is triggered by the ' - 'following actions during the ' - 'respective phases:') - target_tbody = self._build_table( - section, 'Policy Triggers', - ['Action', 'Phase'], - policy_trigger_description - ) - sorted_targets = sorted(obj.TARGET, key=lambda tup: tup[1]) - for phase, action in sorted_targets: - cells = [action, phase] - self._create_table_row(cells, target_tbody) - - # build properties - properties_section = self._create_section(section, 'properties', - title='Properties') - else: - properties_section = content - - sorted_schema = sorted(obj.properties_schema.items(), - key=cmp_to_key(self._sort_by_type)) - for k, v in sorted_schema: - self._build_properties(k, v, properties_section) - - # we return the result - return content - - def _create_section(self, parent, sectionid, title=None, term=None): - """Create a new section - - :returns: If term is specified, returns a definition node contained - within the newly created section. Otherwise return the newly created - section node. - """ - - idb = nodes.make_id(sectionid) - section = nodes.section(ids=[idb]) - parent.append(section) - - if term: - if term != '**': - section.append(nodes.term('', term)) - - definition = nodes.definition() - section.append(definition) - - return definition - - if title: - section.append(nodes.title('', title)) - - return section - - def _create_list_item(self, str): - """Creates a new list item - - :returns: List item node - """ - para = nodes.paragraph() - para += nodes.strong('', str) - - item = nodes.list_item() - item += para - - return item - - def _create_def_list(self, parent): - """Creates a definition list - - :returns: Definition list node - """ - - definition_list = nodes.definition_list() - parent.append(definition_list) - - return definition_list - - def _sort_by_type(self, x, y): - """Sort two keys so that map and list types are ordered last.""" - - x_key, x_value = x - y_key, y_value = y - - # if both values are map or list, sort by their keys - if ((isinstance(x_value, schema.Map) or - isinstance(x_value, schema.List)) and - (isinstance(y_value, schema.Map) or - isinstance(y_value, schema.List))): - return (x_key > y_key) - (x_key < y_key) - - # show simple types before maps or list - if (isinstance(x_value, schema.Map) or - isinstance(x_value, schema.List)): - return 1 - - if (isinstance(y_value, schema.Map) or - isinstance(y_value, schema.List)): - return -1 - - return (x_key > y_key) - (x_key < y_key) - - def _create_table_row(self, cells, parent): - """Creates a table row for cell in cells - - :returns: Row node - """ - - row = nodes.row() - parent.append(row) - - for c in cells: - entry = nodes.entry() - row += entry - entry += nodes.literal(text=c) - - return row - - def _build_table(self, section, title, headers, description=None): - """Creates a table with given title, headers and description - - :returns: Table body node - """ - - table_section = self._create_section(section, title, title=title) - - if description: - field = nodes.line('', description) - table_section.append(field) - - table = nodes.table() - tgroup = nodes.tgroup(len(headers)) - table += tgroup - - table_section.append(table) - - for _ in headers: - tgroup.append(nodes.colspec(colwidth=1)) - - # create header - thead = nodes.thead() - tgroup += thead - self._create_table_row(headers, thead) - - tbody = nodes.tbody() - tgroup += tbody - - # create body consisting of targets - tbody = nodes.tbody() - tgroup += tbody - - return tbody - - def _build_properties(self, k, v, definition): - """Build schema property documentation - - :returns: None - """ - - if isinstance(v, schema.Map): - newdef = self._create_section(definition, k, term=k) - - if v.schema is None: - # if it's a map for arbritary values, only include description - field = nodes.line('', v.description) - newdef.append(field) - return - - newdeflist = self._create_def_list(newdef) - - sorted_schema = sorted(v.schema.items(), - key=cmp_to_key(self._sort_by_type)) - for key, value in sorted_schema: - self._build_properties(key, value, newdeflist) - elif isinstance(v, schema.List): - newdef = self._create_section(definition, k, term=k) - - # identify next section as list properties - field = nodes.line() - emph = nodes.emphasis('', 'List properties:') - field.append(emph) - newdef.append(field) - - newdeflist = self._create_def_list(newdef) - - self._build_properties('**', v.schema['*'], newdeflist) - else: - newdef = self._create_section(definition, k, term=k) - if 'description' in v: - field = nodes.line('', v['description']) - newdef.append(field) - else: - field = nodes.line('', '++') - newdef.append(field) - - -class SchemaProperties(SchemaDirective): - properties_only = True - - -class SchemaSpec(SchemaDirective): - section_title = 'Spec' - properties_only = False - - -def setup(app): - app.add_directive('schemaprops', SchemaProperties) - app.add_directive('schemaspec', SchemaSpec) diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index d4c30119a..000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================================== -Welcome to the Senlin documentation! -==================================== - -1 Introduction -~~~~~~~~~~~~~~ - -Senlin is a service to create and manage :term:`Cluster` of multiple cloud -resources. Senlin provides an OpenStack-native REST API and a AWS -AutoScaling-compatible Query API is in plan. - -.. toctree:: - :maxdepth: 1 - - overview - install/index - configuration/index - -2 Tutorial -~~~~~~~~~~ - -This tutorial walks you through the Senlin features step-by-step. For more -details, please check the :ref:`user-references` section. - -.. toctree:: - :maxdepth: 1 - - tutorial/basics - tutorial/policies - tutorial/receivers - tutorial/autoscaling - -.. _user-references: - -3 User References -~~~~~~~~~~~~~~~~~ - -This section provides a detailed documentation for the concepts and built-in -policy types. - -3.1 Basic Concepts ------------------- - -.. toctree:: - :maxdepth: 1 - - user/profile_types - user/profiles - user/clusters - user/nodes - user/membership - user/policy_types - user/policies - user/bindings - user/receivers - user/actions - user/events - -3.2 Built-in Policy Types -------------------------- - -The senlin service is released with some built-in policy types that target -some common use cases. You can develop and deploy your own policy types by -following the instructions in the :ref:`developer-guide` section. - -The following is a list of builtin policy types: - -.. toctree:: - :maxdepth: 1 - - user/policy_types/affinity - user/policy_types/batch - user/policy_types/deletion - user/policy_types/health - user/policy_types/load_balancing - user/policy_types/scaling - user/policy_types/region_placement - user/policy_types/zone_placement - -3.3 Built-in Profile Types --------------------------- - -The senlin service is released with some built-in profile types that target -some common use cases. You can develop and deploy your own profile types by -following the instructions in the :ref:`developer-guide` section. - -The following is a list of builtin profile types: - -.. toctree:: - :maxdepth: 1 - - user/profile_types/nova - user/profile_types/stack - user/profile_types/docker - -4 Usage Scenarios -~~~~~~~~~~~~~~~~~ - -This section provides some guides for typical usage scenarios. More scenarios -are to be added. - -4.1 Managing Node Affinity --------------------------- - -Senlin provides an :doc:`Affinity Policy ` for -managing node affinity. This section contains a detailed introduction on how -to use it. - -.. toctree:: - :maxdepth: 1 - - scenarios/affinity - -4.2 Building AutoScaling Clusters ---------------------------------- - -.. toctree:: - :maxdepth: 1 - - scenarios/autoscaling_overview - scenarios/autoscaling_ceilometer - scenarios/autoscaling_heat - - -.. _developer-guide: - -5. Developer's Guide -~~~~~~~~~~~~~~~~~~~~ - -This section targets senlin developers. - -5.1 Understanding the Design ----------------------------- - -.. toctree:: - :maxdepth: 1 - - contributor/api_microversion - contributor/authorization - contributor/profile - contributor/cluster - contributor/node - contributor/policy - contributor/action - contributor/receiver - contributor/testing - contributor/plugin_guide - contributor/osprofiler - -5.2 Built-in Policy Types -------------------------- - -Senlin provides some built-in policy types which can be instantiated and then -attached to your clusters. These policy types are designed to be orthogonal so -that each of them can be used independently. They are also expected to work -in a collaborative way to meet the needs of complicated usage scenarios. - -.. toctree:: - :maxdepth: 1 - - contributor/policies/affinity_v1 - contributor/policies/deletion_v1 - contributor/policies/health_v1 - contributor/policies/load_balance_v1 - contributor/policies/region_v1 - contributor/policies/scaling_v1 - contributor/policies/zone_v1 - -5.3 Reviewing Patches ---------------------- - -There are many general guidelines across the community about code reviews, for -example: - -- `Code review guidelines (wiki)`_ -- `OpenStack developer's guide`_ - -Besides these guidelines, senlin has some additional amendments based on daily -review experiences that should be practiced. - -.. toctree:: - :maxdepth: 1 - - contributor/reviews - -6 Administering Senlin -~~~~~~~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - admin/index - - -7 References -~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - reference/man/index - reference/glossary - reference/api - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`search` - -.. _`Code review guidelines (wiki)`: https://wiki.openstack.org/wiki/CodeReviewGuidelines -.. _`OpenStack developer's guide`: https://docs.openstack.org/infra/manual/developers.html diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst deleted file mode 100644 index 349d5c71f..000000000 --- a/doc/source/install/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Installing Senlin -================= - -.. toctree:: - :maxdepth: 2 - - install-devstack.rst - install-source.rst - install-rdo.rst - verify.rst - -This chapter assumes a working setup of OpenStack following the -`OpenStack Installation Tutorial `_. - diff --git a/doc/source/install/install-devstack.rst b/doc/source/install/install-devstack.rst deleted file mode 100644 index a980d83a7..000000000 --- a/doc/source/install/install-devstack.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-devstack: - -==================== -Install via Devstack -==================== - -This is the recommended way to install the Senlin service. Please refer to -following detailed instructions. - -1. Download DevStack:: - - $ git clone https://git.openstack.org/openstack-dev/devstack - $ cd devstack - -2. Add following repo as external repositories into your ``local.conf`` file:: - - [[local|localrc]] - #Enable senlin - enable_plugin senlin https://git.openstack.org/openstack/senlin - #Enable senlin-dashboard - enable_plugin senlin-dashboard https://git.openstack.org/openstack/senlin-dashboard - -Optionally, you can add a line ``SENLIN_USE_MOD_WSGI=True`` to the same ``local.conf`` -file if you prefer running the Senlin API service under Apache. - -3. Run ``./stack.sh``:: - - $ ./stack.sh - -Note that Senlin client is also installed when following the instructions. - - diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index a5e005803..000000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-rdo: - -=============== -Install via RDO -=============== - -This section describes how to install and configure the Senlin service -for Red Hat Enterprise Linux 7 and CentOS 7. - -This install file support from ``pike`` version. - -Prerequisites -------------- - -Before you install and configure Senlin, you must create a -database, service credentials, and API endpoints. Senlin also -requires additional information in the Identity service. - -1. To create the database, complete these steps: - -* Use the database access client to connect to the database - server as the ``root`` user: - -:: - - $ mysql -u root -p - -* Create the ``senlin`` database: - -:: - - CREATE DATABASE senlin DEFAULT CHARACTER SET utf8; - -* Grant proper access to the ``senlin`` database: - -:: - - GRANT ALL ON senlin.* TO 'senlin'@'localhost' \ - IDENTIFIED BY 'SENLIN_DBPASS'; - GRANT ALL ON senlin.* TO 'senlin'@'%' \ - IDENTIFIED BY 'SENLIN_DBPASS'; - -Replace ``Senlin_DBPASS`` with a suitable password. - -* Exit the database access client. - -2. Source the ``admin`` credentials to gain access to - admin-only CLI commands: - -:: - - $ . admin-openrc - -3. To create the service credentials, complete these steps: - -* Create the ``senlin`` user: - -:: - - $openstack user create --project service --password-prompt senlin - User Password: - Repeat User Password: - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | e0353a670a9e496da891347c589539e9 | - | enabled | True | - | id | ca2e175b851943349be29a328cc5e360 | - | name | senlin | - +-----------+----------------------------------+ - -* Add the ``admin`` role to the ``senlin`` user: - -:: - - $ openstack role add --project service --user senlin admin - - .. note:: - - This command provides no output. - -* Create the ``senlin`` service entities: - -:: - - $ openstack service create --name senlin \ - --description "Senlin Clustering Service V1" clustering - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Senlin Clustering Service V1 | - | enabled | True | - | id | 727841c6f5df4773baa4e8a5ae7d72eb | - | name | senlin | - | type | clustering | - +-------------+----------------------------------+ - -4. Create the senlin service API endpoints: - -:: - - $ openstack endpoint create senlin --region RegionOne \ - public http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 90485e3442544509849e3c79bf93c15d | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - - $ openstack endpoint create senlin --region RegionOne \ - admin http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | d4a9f5a902574479a73e520dd3f93dfb | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - - $ openstack endpoint create senlin --region RegionOne \ - internal http://controller:8777 - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | d119b192857e4760a196ba2b88d20bc6 | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 9130295921b04601a81f95c417b9f113 | - | service_name | senlin | - | service_type | clustering | - | url | http://controller:8777 | - +--------------+----------------------------------+ - -Install and configure components --------------------------------- - -.. note:: - - Default configuration files vary by distribution. You might need - to add these sections and options rather than modifying existing - sections and options. Also, an ellipsis (``...``) in the configuration - snippets indicates potential default configuration options that you - should retain. - -1. Install the packages: - -:: - - # yum install openstack-senlin-api.noarch \ - openstack-senlin-common.noarch \ - openstack-senlin-conductor.noarch \ - openstack-senlin-engine.noarch \ - openstack-senlin-health-manager.noarch \ - python3-senlinclient.noarch - -2. Edit file :file:`/etc/senlin/senlin.conf` according to your system settings. The most common options to be customized include: - -:: - - [database] - connection = mysql+pymysql://senlin:@127.0.0.1/senlin?charset=utf8 - - [keystone_authtoken] - service_token_roles_required = True - auth_type = password - user_domain_name = Default - project_domain_name = Default - project_name = service - username = senlin - password = - www_authenticate_uri = http:///identity/v3 - auth_url = http:///identity - - [authentication] - auth_url = http://:5000/v3 - service_username = senlin - service_password = - service_project_name = service - - [oslo_messaging_rabbit] - rabbit_userid = - rabbit_hosts = - rabbit_password = - - [oslo_messaging_notifications] - driver = messaging - -For more comprehensive helps on configuration options, please refer to -:doc:`Configuration Options ` documentation. - - -3. Populate the Senlin database: - -:: - - # senlin-manage db_sync - - .. note:: - - Ignore any deprecation messages in this output. - -Finalize installation ---------------------- - -* Start the Senlin services and configure them to start - when the system boots: - -:: - - # systemctl enable openstack-senlin-api.service \ - openstack-senlin-conductor.service \ - openstack-senlin-engine.service \ - openstack-senlin-health-manager.service - # systemctl start openstack-senlin-api.service \ - openstack-senlin-conductor.service \ - openstack-senlin-engine.service \ - openstack-senlin-health-manager.service diff --git a/doc/source/install/install-source.rst b/doc/source/install/install-source.rst deleted file mode 100644 index 7076918e9..000000000 --- a/doc/source/install/install-source.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _install-source: - -============================ -Install from Git Source Code -============================ - -Install Senlin Server ---------------------- - -1. Get Senlin source code from OpenStack git repository. - -:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/senlin.git - -2. Install Senlin with required packages. - -:: - - $ cd /opt/stack/senlin - $ sudo pip install -e . - -3. Register Senlin clustering service with keystone. - - This can be done using the :command:`setup-service` script under the - :file:`tools` folder. - - **NOTE:** Suppose you have devstack installed under the - :file:`/opt/devstack` folder - -:: - - $ . /opt/devstack/openrc admin admin - $ cd /opt/stack/senlin/tools - $ ./setup-service - -4. Generate configuration file for the Senlin service. - -:: - - $ cd /opt/stack/senlin - $ tools/gen-config - $ sudo mkdir /etc/senlin - $ sudo cp etc/senlin/api-paste.ini /etc/senlin - $ sudo cp etc/senlin/senlin.conf.sample /etc/senlin/senlin.conf - -Edit file :file:`/etc/senlin/senlin.conf` according to your system settings. -The most common options to be customized include: - -:: - - [database] - connection = mysql+pymysql://senlin:@127.0.0.1/senlin?charset=utf8 - - [keystone_authtoken] - service_token_roles_required = True - auth_type = password - user_domain_name = Default - project_domain_name = Default - project_name = service - username = senlin - password = - www_authenticate_uri = http:///identity/v3 - auth_url = http:///identity - - [authentication] - auth_url = http://:5000/v3 - service_username = senlin - service_password = - service_project_name = service - - [oslo_messaging_rabbit] - rabbit_userid = - rabbit_hosts = - rabbit_password = - - [oslo_messaging_notifications] - driver = messaging - -For more comprehensive helps on configuration options, please refer to -:doc:`Configuration Options ` documentation. - -In case you want to modify access policies of Senlin, please generate sample -policy file, copy it to `/etc/senlin/policy.yaml` and then update it. - -:: - - $ cd /opt/stack/senlin - $ tools/gen-policy - $ sudo cp etc/senlin/policy.yaml.sample /etc/senlin/policy.yaml - -5. Create Senlin Database. - -Create Senlin database using the :command:`senlin-db-recreate` script under -the :file:`tools` subdirectory. Before calling the script, you need edit it -to customize the password you will use for the ``senlin`` user. You need to -update this script with the entered in step4. - -:: - - $ cd /opt/stack/senlin/tools - $ ./senlin-db-recreate - -6. Start the senlin api, conductor, engine and health-manager services. - -You may need multiple consoles for the services i.e., one for each service. - -:: - - $ senlin-conductor --config-file /etc/senlin/senlin.conf - $ senlin-engine --config-file /etc/senlin/senlin.conf - $ senlin-health-manager --config-file /etc/senlin/senlin.conf - $ senlin-api --config-file /etc/senlin/senlin.conf - -Install Senlin Client ---------------------- - -1. Get Senlin client code from OpenStack git repository. - -:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/python-senlinclient.git - -2. Install senlin client. - -:: - - $ cd python-senlinclient - $ sudo python setup.py install - diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index 4f2515bc6..000000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _verify: - -======================== -Verify Your Installation -======================== - -Verify operation of the Cluster service. - - -.. note:: - - Perform these commands on the controller node. - -#. Source the ``admin`` tenant credentials: - - .. code-block:: console - - $ . admin-openrc - -#. List service components to verify successful launch and - registration of each process: - - .. code-block:: console - - $ openstack cluster build info - +--------+---------------------+ - | Field | Value | - +--------+---------------------+ - | api | { | - | | "revision": "1.0" | - | | } | - | engine | { | - | | "revision": "1.0" | - | | } | - +--------+---------------------+ - -You are ready to begin your journey (aka. adventure) with Senlin, now. diff --git a/doc/source/overview.rst b/doc/source/overview.rst deleted file mode 100644 index 2be2c1d3f..000000000 --- a/doc/source/overview.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _guide-overview: - -======== -Overview -======== - -Senlin is a **clustering service** for OpenStack clouds. It creates and -operates clusters of homogeneous objects exposed by other OpenStack services. -The goal is to make orchestration of collections of similar objects easier. - -Senlin interacts with other OpenStack services so that clusters of resources -exposed by those services can be created and operated. These interactions are -mostly done through the via :term:`profile` plugins. Each profile type -implementation enable Senlin to create, update, delete a specific type of -resources. - -A :term:`Cluster` can be associated with different :term:`Policy` objects -that can be checked/enforced at varying enforcement levels. Through service -APIs, a user can dynamically add :term:`Node` to and remove node from a -cluster, attach and detach policies, such as *creation policy*, *deletion -policy*, *load-balancing policy*, *scaling policy*, *health policy* etc. -Through integration with other OpenStack projects, users will be enabled to -manage deployments and orchestrations large-scale resource pools much easier. - -Senlin is designed to be capable of managing different types of objects. An -object's lifecycle is managed using :term:`Profile Type` implementations, -which are plugins that can be dynamically loaded by the service engine. - -Components -~~~~~~~~~~ - -The developers are focusing on creating an OpenStack style project using -OpenStack design tenets, implemented in Python. We have started with a close -interaction with Heat project. - -senlinclient ------------- - -The :program:`senlinclient` package provides a plugin for the openstackclient -tool so you have a command line interface to communicate with -the :program:`senlin-api` to manage clusters, nodes, profiles, policies, -actions and events. End developers could also use the Senlin REST API directly. - -senlin-dashboard ----------------- -The :program:`senlin-dashboard` is a Horizon plugin that provides a UI for -senlin. - -senlin-api ----------- - -The :program:`senlin-api` component provides an OpenStack-native REST API that -processes API requests by sending them to the :program:`senlin-engine` over RPC. - -senlin-engine -------------- - -The :program:`senlin-engine`'s main responsibility is to create and orchestrate -the clusters, nodes, profiles and policies. - - -Installation -~~~~~~~~~~~~ - -You will need to make sure you have a suitable environment for deploying -Senlin. Please refer to :doc:`Installation ` for detailed -instructions on setting up an environment to use the Senlin service. diff --git a/doc/source/reference/api.rst b/doc/source/reference/api.rst deleted file mode 100644 index 45c84435e..000000000 --- a/doc/source/reference/api.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -API Documentation ------------------ - -Follow the link below for the Senlin API V1 specification: - -- `OpenStack API Complete Reference - Clustering`_ - - - -.. _`OpenStack API Complete Reference - Clustering`: https://docs.openstack.org/api-ref/clustering/ diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst deleted file mode 100644 index 3eda3b798..000000000 --- a/doc/source/reference/glossary.rst +++ /dev/null @@ -1,146 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======== -Glossary -======== - -This section contains the glossary for the Senlin service. - -.. glossary:: - :sorted: - - Action - An action is an operation that can be performed on a :term:`Cluster` or a - :term:`Node` etc. Different types of objects support different set of - actions. An action is executed by a :term:`Worker` thread when the action - becomes READY. Most Senlin APIs create actions in database for worker - threads to execute asynchronously. An action, when executed, will check - and enforce :term:`Policy` associated with the cluster. An action can be - triggered via :term:`Receiver`. - - API server - HTTP REST API service for Senlin. - - Cluster - A cluster is a group of homogeneous objects (i.e. :term:`Node`). A - cluster consists of 0 or more nodes and it can be associated with 0 or - more :term:`Policy` objects. It is associated with a :term:`Profile Type` - when created. - - Dependency - The :term:`Action` objects are stored into database for execution. These - actions may have dependencies among them. - - Dispatcher - A dispatcher is a processor that takes a Senlin :term:`Action` as input - and then converts it into a desired format for storage or further - processing. - - Driver - A driver is a Senlin internal module that enables Senlin :term:`Engine` to - interact with other :term:`OpenStack` services. The interactions here are - usually used to create, destroy, update the objects exposed by those - services. - - Engine - The daemon that actually perform the operations requested by users. It - provides RPC interfaces to RPC clients. - - Environment - Used to specify user provided :term:`Plugin` that implement a - :term:`Profile Type` or a :term:`Policy Type`. User can provide plugins - that override the default plugins by customizing an environment. - - Event - An event is a record left in Senlin database when something matters to - users happened. An event can be of different criticality levels. - - Index - An integer property of a :term:`Node` when it is a member of a - :term:`Cluster`. Each node has an auto-generated index value that is - unique in the cluster. - - Node - A node is an object that belongs to at most one :term:`Cluster`. A node - can become an 'orphaned node' when it is not a member of any clusters. - All nodes in a cluster must be of the same :term:`Profile Type` of the - owning cluster. In general, a node represents a physical object exposed - by other OpenStack services. A node has a unique :term:`Index` value - scoped to the cluster that owns it. - - Permission - A string dictating which user (role or group) has what permissions on a - given object (i.e. :term:`Cluster`, :term:`Node`, :term:`Profile` and - :term:`Policy` etc.) - - Plugin - A plugin is an implementation of a :term:`Policy Type` or :term:`Profile - Type` that can be dynamically loaded and registered to Senlin engine. - Senlin engine comes with a set of builtin plugins. Users can add their own - plugins by customizing the :term:`Environment` configuration. - - Policy - A policy is a set of rules that can be checked and/or enforced when an - :term:`Action` is performed on a :term:`Cluster`. A policy is an instance - of a particular :term:`Policy Type`. Users can specify the enforcement - level when creating a policy object. Such a policy object can be attached - to and detached from a cluster. - - Policy Type - A policy type is an abstraction of :term:`Policy` objects. The - implementation of a policy type specifies when the policy should be - checked and/or enforce, what profile types are supported, what operations - are to be done before, during and after each :term:`Action`. All policy - types are provided as Senlin plugins. - - Profile - A profile is a mould used for creating objects (i.e. :term:`Node`). A - profile is an instance of a :term:`Profile Type` with all required - information specified. Each profile has a unique ID. As a guideline, a - profile cannot be updated once created. To change a profile, you have to - create a new instance. - - Profile Type - A profile type is an abstraction of objects that are backed by some - :term:`Driver`. The implementation of a profile type calls the driver(s) - to create objects that are managed by Senlin. The implementation also - serves a factory that can "produce" objects given a profile. All profile - types are provided as Senlin plugins. - - Role - A role is a string property that can be assigned to a :term:`Node`. - Nodes in the same cluster may assume a role for certain reason such as - application configuration. The default role for a node is empty. - - OpenStack - Open source software for building private and public clouds. - - Receiver - A receiver is an abstract resource created at the senlin engine that can - be used to hook the engine to some external event/alarm sources. A - receiver can be of different types. The most common type is a - :term:`Webhook`. - - Webhook - A webhook is an encoded URI (Uniform Resource Identifier) that for - triggering some operations (e.g. Senlin actions) on some resources. Such - a webhook URL is the only thing one needs to know to trigger an action on - a cluster. - - Worker - A worker is the thread created and managed by Senlin engine to execute - an :term:`Action` that becomes ready. When the current action completes - (with a success or failure), a worker will check the database to find - another action for execution. diff --git a/doc/source/reference/man/index.rst b/doc/source/reference/man/index.rst deleted file mode 100644 index 092d726ef..000000000 --- a/doc/source/reference/man/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -========= -Man Pages -========= - - -Senlin services -~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - senlin-conductor - senlin-engine - senlin-health-manager - senlin-api - - -Senlin utilities -~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - senlin-manage - senlin-status diff --git a/doc/source/reference/man/senlin-api.rst b/doc/source/reference/man/senlin-api.rst deleted file mode 100644 index da74ed512..000000000 --- a/doc/source/reference/man/senlin-api.rst +++ /dev/null @@ -1,51 +0,0 @@ -========== -senlin-api -========== - -.. program:: senlin-api - -SYNOPSIS -~~~~~~~~ - -``senlin-api [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-api provides an external REST API to the Senlin service. - -INVENTORY -~~~~~~~~~ - -senlin-api is a WSGI application that exposes an external REST style API to -the Senlin service. senlin-api communicates with senlin-engine using Remote -Procedure Calls (RPC), which is based on AMQP protocol. - -OPTIONS -~~~~~~~ - -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf -* /etc/senlin/api-paste.ini -* /etc/senlin/policy.yaml - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-conductor.rst b/doc/source/reference/man/senlin-conductor.rst deleted file mode 100644 index 177ecfd8c..000000000 --- a/doc/source/reference/man/senlin-conductor.rst +++ /dev/null @@ -1,47 +0,0 @@ -================ -senlin-conductor -================ - -.. program:: senlin-conductor - -SYNOPSIS -~~~~~~~~ - -``senlin-conductor [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-conductor provides an internal RPC interface for the senlin-api to -invoke. - -INVENTORY -~~~~~~~~~ - -The senlin-conductor provides an internal RPC interface. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-engine.rst b/doc/source/reference/man/senlin-engine.rst deleted file mode 100644 index 0482ae0fb..000000000 --- a/doc/source/reference/man/senlin-engine.rst +++ /dev/null @@ -1,48 +0,0 @@ -============= -senlin-engine -============= - -.. program:: senlin-engine - -SYNOPSIS -~~~~~~~~ - -``senlin-engine [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-engine is the server that perform operations on objects such as -nodes, policies and profiles. - -INVENTORY -~~~~~~~~~ - -The senlin-engine provides services to the callers so that requests on -various objects can be met by background operations. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-health-manager.rst b/doc/source/reference/man/senlin-health-manager.rst deleted file mode 100644 index 21885f5fc..000000000 --- a/doc/source/reference/man/senlin-health-manager.rst +++ /dev/null @@ -1,48 +0,0 @@ -===================== -senlin-health-manager -===================== - -.. program:: senlin-health-manager - -SYNOPSIS -~~~~~~~~ - -``senlin-health-manager [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-health-manager is the server that is responsible for cluster health -related operations. - -INVENTORY -~~~~~~~~~ - -The senlin-health-manager provides services to the callers so that various -cluster health related operations can be performed in the background. - -OPTIONS -~~~~~~~ -.. cmdoption:: --config-file - - Path to a config file to use. Multiple config files can be specified, with - values in later files taking precedence. - - -.. cmdoption:: --config-dir - - Path to a config directory to pull .conf files from. This file set is - sorted, so as to provide a predictable parse order if individual options are - over-ridden. The set is parsed after the file(s), if any, specified via - --config-file, hence over-ridden options in the directory take precedence. - -FILES -~~~~~ - -* /etc/senlin/senlin.conf - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-manage.rst b/doc/source/reference/man/senlin-manage.rst deleted file mode 100644 index bd027b37f..000000000 --- a/doc/source/reference/man/senlin-manage.rst +++ /dev/null @@ -1,98 +0,0 @@ -============= -senlin-manage -============= - -.. program:: senlin-manage - -SYNOPSIS -~~~~~~~~ - -``senlin-manage [options]`` - -DESCRIPTION -~~~~~~~~~~~ - -senlin-manage provides utilities for operators to manage Senlin specific -maintenance operations. - - -OPTIONS -~~~~~~~ - -To issue a senlin-manage command: - -``senlin-manage [options]`` - -Run with `-h` or `--help` to see a list of available commands: - -``senlin-manage -h`` - -Commands are `db_version`, `db_sync`, `service`, `event_purge` . Below are -some detailed descriptions. - - -Senlin DB version ------------------ - -``senlin-manage db_version`` - -Print out the db schema revision. - -``senlin-manage db_sync`` - -Sync the database up to the most recent version. - - -Senlin Service Manage ---------------------- - -``senlin-manage service list`` - -Print out the senlin-engine service status. - -``senlin-manage service clean`` - -Cleanup senlin-engine dead service. - - -Senlin Event Manage -------------------- - -``senlin-manage event_purge -p [] -g {days,hours,minutes,seconds} age`` - -Purge the specified event records in senlin's database. - -You can use command purge three days ago data. - -:: - - senlin-manage event_purge -p e127900ee5d94ff5aff30173aa607765 -g days 3 - - -Senlin Action Manage --------------------- - -``senlin-manage action_purge -p [] -g {days,hours,minutes,seconds} age`` - -Purge the specified action records in senlin's database. - -You can use this command to purge actions that are older than 3 days. - -:: - - senlin-manage action_purge -p e127900ee5d94ff5aff30173aa607765 -g days 3 - - -FILES -~~~~~ - -The /etc/senlin/senlin.conf file contains global options which can be -used to configure some aspects of `senlin-manage`, for example the DB -connection and logging options. - - -BUGS -~~~~ - -* Senlin issues are tracked in Launchpad so you can view or report bugs here - `OpenStack Senlin Bugs `__ diff --git a/doc/source/reference/man/senlin-status.rst b/doc/source/reference/man/senlin-status.rst deleted file mode 100644 index c18d2c684..000000000 --- a/doc/source/reference/man/senlin-status.rst +++ /dev/null @@ -1,78 +0,0 @@ -============= -senlin-status -============= - -Synopsis -======== - -:: - - senlin-status [] - -Description -=========== - -:program:`senlin-status` is a tool that provides routines for checking the -status of a Senlin deployment. - -Options -======= - -The standard pattern for executing a :program:`senlin-status` command is:: - - senlin-status [] - -Run without arguments to see a list of available command categories:: - - senlin-status - -Categories are: - -* ``upgrade`` - -Detailed descriptions are below. - -You can also run with a category argument such as ``upgrade`` to see a list of -all commands in that category:: - - senlin-status upgrade - -These sections describe the available categories and arguments for -:program:`senlin-status`. - -Upgrade -~~~~~~~ - -.. _senlin-status-checks: - -``senlin-status upgrade check`` - Performs a release-specific readiness check before restarting services with - new code. This command expects to have complete configuration and access - to databases and services. - - **Return Codes** - - .. list-table:: - :widths: 20 80 - :header-rows: 1 - - * - Return code - - Description - * - 0 - - All upgrade readiness checks passed successfully and there is nothing - to do. - * - 1 - - At least one check encountered an issue and requires further - investigation. This is considered a warning but the upgrade may be OK. - * - 2 - - There was an upgrade status check failure that needs to be - investigated. This should be considered something that stops an - upgrade. - * - 255 - - An unexpected error occurred. - - **History of Checks** - - **7.0.0 (Stein)** - - * Placeholder to be filled in with checks as they are added in Stein. diff --git a/doc/source/scenarios/affinity.rst b/doc/source/scenarios/affinity.rst deleted file mode 100644 index 05bf09169..000000000 --- a/doc/source/scenarios/affinity.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scenario-affinity: - -====================== -Managing Node Affinity -====================== - -When deploying multiple nodes running identical instances of the same service -(or application) for the sake of load-balancing or high-availability, it is -very likely you don't want all nodes deployed onto the same physical machine. -However, when you have a cluster with some nodes playing one role (e.g. -Application Server) and other nodes playing another role (.e.g. Database), -you may want to collocate these nodes onto the same physical machine so that -inter-node communication can be faster. - -To meet these intra-cluster node collocation requirements, you have different -choices. - - -Use Server Group in Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For the purpose of managing cluster node affinity, you may choose to create -a *server group* by invoking nova command line, e.g.: - -:: - - $ openstack server group create sg01 --policy affinity - +--------------+------+------------+---------+---------------+---------+----------+ - | Id | Name | Project Id | User Id | Policies | Members | Metadata | - +--------------+------+------------+---------+---------------+---------+----------+ - | 54a88567-... | sg01 | ... | ... | [u'affinity'] | [] | {} | - +--------------+------+------------+---------+---------------+---------+----------+ - -Then when you create a nova server profile, you can input the name of the -server group into the ``scheduler_hints`` property as shown below: - -:: - - $ cat web_cluster.yaml - type: os.nova.server - version: 1.0 - properties: - name: web_server - - <... other properties go here ...> - - scheduler_hints: - group: sg01 - -Later, when you create a cluster using this profile, the server nodes will be -booted on the same physical host if possible. In other words, the affinity -is managed directly by the nova compute service. If there are no physical -hosts satisfying the constraints, node creation requests will fail. - - -Use Same-Host or Different-Host in Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When adding nodes to an existing cluster, the new nodes can reference a -different profile object of the same profile type (i.e. ``os.nova.server``). -If a new node is expected to be launched on the same/different host from a -set of server nodes, you can specify the constraint as a ``scheduler_hints`` -as well. - -Suppose you have two server nodes in a cluster with UUID "UUID1" and "UUID2" -respectively, you can input the scheduling constraints in a profile as shown -below: - -:: - - $ cat standalone_server.yaml - type: os.nova.server - version: 1.0 - properties: - name: web_server - - <... other properties go here ...> - - scheduler_hints: - different_host: - - UUID1 - - UUID2 - -When adding a node that uses this profile into the cluster, the node creation -either fails (e.g. no available host found) or the node is created -successfully on a different host from the specified server nodes. - -Similarly, you can replace the ``different_host`` key above by ``same_host`` -to instruct that the new node collocated with the specified existing node(s). - - -Managing Affinity using Affinity Policy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Another option to manage node affinity is to use the affinity policy -(see :doc:`Affinity Policy <../user/policy_types/affinity>`). By creating and -attaching an affinity policy to a cluster, you can still control how nodes -are distributed relative to the underlying hosts. See the above link for usage -of the policy. - - -See Also -~~~~~~~~ - -* :doc:`Managing Policies <../user/policies>` -* :doc:`Builtin Policy - Affinity Policy <../user/policy_types/affinity>` diff --git a/doc/source/scenarios/autoscaling_ceilometer.rst b/doc/source/scenarios/autoscaling_ceilometer.rst deleted file mode 100644 index 9fa1676f3..000000000 --- a/doc/source/scenarios/autoscaling_ceilometer.rst +++ /dev/null @@ -1,282 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scenario-autoscaling-ceilometer: - - -================================= -Autoscaling using Ceilometer/Aodh -================================= - -As a telemetry service, the ceilometer project consists of several sub-projects -which provide metering, monitoring and alarming services in the telemetry -space. This section walks you through the steps to build an auto-scaling -solution by integrating senlin with ceilometer/aodh. - -Step 1: Create a VM cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first step is to create a profile using a spec file like the following one -and save it to a file, e.g. :file:`sample_server.yaml`: - -.. code-block:: yaml - - type: os.nova.server - version: 1.0 - properties: - name: cirros_server - flavor: m1.tiny - image: cirros-0.3.5-x86_64-disk - key_name: oskey - networks: - - network: private - -Note this spec file assumes that you have a working nova key-pair named -"``oskey``" and there is a network named "``private``". You may need to change -these values based your environment settings. To create a profile using this -spec: - -.. code-block:: console - - $ openstack cluster profile create --spec-file sample_server.yaml pserver - -Then you can create a cluster using the profile named "``pserver``": - -.. code-block:: console - - $ openstack cluster create --profile pserver --desired-capacity 2 mycluster - -You can show cluster details, using the command `openstack cluster show mycluster` - -.. code-block:: console - - $ openstack cluster show mycluster - +------------------+--------------------------------------------------------------------------------+ - | Field | Value | - +------------------+--------------------------------------------------------------------------------+ - | config | {} | - | created_at | 2016-08-01T02:14:38Z | - | data | {} | - | dependents | {} | - | desired_capacity | 2 | - | domain_id | None | - | id | 09e9b90c-03e3-41e3-8a31-e9bde6707585 | - | init_at | 2016-08-01T02:13:59Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | mycluster | - | node_ids | 78509587-fa74-49cb-984f-a2e033316a63 | - | | 8ccc31e6-14a3-4882-b0ef-27108cdb238d | - | profile_id | 8f81a3a5-e91b-4fd5-91f1-e4a04ddae20f | - | profile_name | pserver | - | project_id | e127900ee5d94ff5aff30173aa607765 | - | status | ACTIVE | - | status_reason | CLUSTER_CREATE: number of active nodes is equal or above desired_capacity (2). | - | timeout | 3600 | - | updated_at | 2016-08-01T02:14:38Z | - | user_id | 3914a2df5b7e49e3acbba86044e820ef | - +------------------+--------------------------------------------------------------------------------+ - - -This creates a cluster with 2 nodes created at the beginning. We export the -cluster ID into an environment variable for convenience: - -.. code-block:: console - - $ export MYCLUSTER_ID=10c80bfe-41af-41f7-b9b1-9c81c9e5d21f - -You may want to check the IP addresses assigned to each node. In the output -from the following command, you will find the IP address for the specific node: - -.. code-block:: console - - $ openstack cluster node show 14936837-1459-416b-a1f3-dea026f6cffc --details - ... - | details | +-----------+--------------------------------------+ | - | | | property | value | | - | | +-----------+--------------------------------------+ | - | | | addresses | { | | - | | | | "private": [ | | - | | | | { | | - | | | | "OS-EXT-IPS-MAC:mac-addr": ... | | - | | | | "OS-EXT-IPS:type": "fixed", | | - | | | | "addr": "10.0.0.9", | | - | | | | "version": 4 | | - | | | | } | | - | | | | ] | | - | | | | } | | - | | | flavor | 1 | | - | | | id | 362f57b2-c089-4aab-bab3-1a7ffd4e1834 | | - ... - -We will use these IP addresses later to generate workloads on each nova -server. - -Step 2: Create Receivers -~~~~~~~~~~~~~~~~~~~~~~~~ - -The next step is to create receivers for the cluster for triggering actions on -the cluster. Each receiver is usually created for a specific purpose, so for -different purposes you may need to create more than receivers. - -The following command creates a receiver for scaling out the specified cluster -by two nodes every time it is triggered: - -.. code-block:: console - - $ openstack cluster receiver create --action CLUSTER_SCALE_OUT --params count=2 --cluster mycluster r_01 - +------------+---------------------------------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "432f81d339444cac959bab2fd9ba92fa" | - | | } | - | channel | { | - | | "alarm_url": "http://node1:8777/v1/webhooks/ba...5a/trigger?V=2&count=2 | - | | } | - | cluster_id | b75d25e7-e84d-4742-abf7-d8a3001e25a9 | - | created_at | 2016-08-01T02:17:14Z | - | domain_id | None | - | id | ba13f7cd-7a95-4545-b646-6a833ba6505a | - | location | None | - | name | r_01 | - | params | { | - | | "count": "2" | - | | } | - | project_id | 99185bcde62c478e8d05b702e52d8b8d | - | type | webhook | - | updated_at | None | - | user_id | 6c369aec78b74a4da413f86dadb0255e | - +------------+---------------------------------------------------------------------------------+ - -At present, all property values shown for a receiver are read only. You cannot -change their values once the receiver is created. The only type of receivers -senlin understands is "``webhook``". For the "``action``" parameter, there are -many choices: - -- ``CLUSTER_SCALE_OUT`` -- ``CLUSTER_SCALE_IN`` -- ``CLUSTER_RESIZE`` -- ``CLUSTER_CHECK`` -- ``CLUSTER_UPDATE`` -- ``CLUSTER_DELETE`` -- ``CLUSTER_ADD_NODES`` -- ``CLUSTER_DEL_NODES`` -- ``NODE_CREATE`` -- ``NODE_DELETE`` -- ``NODE_UPDATE`` -- ``NODE_CHECK`` -- ``NODE_RECOVER`` - -Senlin may add supports to more action types in future. - -After a receiver is created, you can check its "``channel``" property value to -find out how to trigger that receiver. For a receiver of type "``webhook``" -(the default and the only supported type as for now), this means you will -check the "``alarm_url``" value. We will use that value later for action -triggering. For convenience, we export that value to an environment variable: - -.. code-block:: console - - $ export ALRM_URL01="http://node1:8777/v1/webhooks/ba...5a/trigger?V=2&count=2" - -Similar to the example above, you can create other receivers for different -kinds of cluster operations or the same cluster operation with different -parameter values. - -Step 3: Creating Aodh Alarms -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once we have the cluster created and prepared to receive external signals, we -can proceed to create alarms using the software/service you deployed. The -following command creates a threshold alarm using aodh alarm service so that: - -- aodh will evaluate the CPU utilization (i.e. ``cpu_util``) metric across the - specified cluster; -- aodh will compute the CPU utilization using the average value during a given - period (i.e. 60 seconds here); -- aodh will perform evaluation at the end of every single period; -- aodh won't trigger alarm actions repeatedly; -- aodh will do metric aggregation based on the specified metadata. - -.. code-block:: console - - $ aodh alarm create \ - --type gnocchi_resources_threshold --name cpu-high \ - --metric cpu_util --threshold 70 --comparison-operator gt \ - --description 'instance running hot' --evaluation-periods 1 \ - --aggregation-method mean --alarm-action $ALRM_URL01 \ - --granularity 600 --repeat-actions False \ - --query metadata.user_metadata.cluster_id=$MYCLUSTER_ID - -Note that we are referencing the two environment variables ``MYCLUSTER_ID`` -and ``ALRM_URL01`` in this command. - -.. note:: - To make aodh aware of the ``cluster_id`` metadata senlin injects into each - and every VM server created, you may need to add the following line into - your :file:`/etc/ceilometer/ceilometer.conf` file:: - - reserved_metadata_keys = cluster_id - - Also note that to make sure your CPU utilization driven metrics are - evaluated at least once per 60 seconds, you will need to change the - ``interval`` value for the ``cpu_source`` in the file - :file:`/etc/ceilometer/pipeline.yaml`. For example, you can change it from - the default value ``600`` to ``60``:: - - sources: - - - name: cpu_source - interval: 600 <- change this to 60 - meters: - - "cpu" - - -Step 4: Run Workloads on Cluster Nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To examine the effect of cluster scaling under high CPU workload. You can now -log into each cluster nodes and run some CPU burning workloads there to drive -the CPU utilization high. For example: - -.. code-block:: console - - $ ssh cirros@10.0.0.9 - $ cat /dev/zero > /dev/null - < Guest system "hang" here... > - -When all nodes in the cluster have their CPU pressure boosted, you can check -the CPU utilization on each node and finally proceed to the next step. - -Step 5: Verify Cluster Scaling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After a while after the CPU workloads on cluster nodes are started, you will -notice that the cluster has been automatically scaled. Two new nodes are -created and added to the cluster. This can be verified by running the -following command: - -.. code-block:: console - - $ openstack cluster show $MYCLUSTER_ID - -Optionally, you can use the following command to check if the anticipated -action was triggered and executed: - -.. code-block:: console - - $ openstack cluster action list --filters target=$MYCLUSTER_ID diff --git a/doc/source/scenarios/autoscaling_heat.rst b/doc/source/scenarios/autoscaling_heat.rst deleted file mode 100644 index 05099675b..000000000 --- a/doc/source/scenarios/autoscaling_heat.rst +++ /dev/null @@ -1,251 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _guide-tutorial-autoscaling-heat: - -===================== -Autoscaling with Heat -===================== - -Goal -~~~~ - -There are Senlin resource types in Heat which make deployment of a full-featured -auto-scaling solution easily attainable. This document is to provide a tutorial for -users who want to use heat to create a senlin cluster. - -It is often required by real deployment practices to make the cluster load-balanced -and auto-scaled. We also want the scaling action triggered based on business data -instead of infrastructure metrics. When existing cluster is not enough to afford the -throughput/workload, the cluster will be scaled-out; when low throughput or workload, -the cluster will be scaled-in. - -Moreover, custom is easy to do when auto-scaling. Receivers can be created to -generate webhooks from scale_out and scale_in actions. Moreover, placement_zone.yaml -and placement_region.yaml can be attached to cluster and guide which zone/region to -place new nodes when scale_out; deletion_policy can be attached to the cluster and -guide the choice of candidates to delete when scale_in. - -Sample template -~~~~~~~~~~~~~~~ - -There have a sample template in heat-template project under directory of senlin -for creation of Senlin elastic load-balanced cluster by Heat. Here we choose some -important parts of the sample to explain one by one. - -The resource below defines a security_group for connection to created load-balanced -cluster: - -.. code-block:: yaml - - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 80 - port_range_max: 80 - -The resource below defines the profile used to create the targeted cluster: - -.. code-block:: yaml - - profile: - type: OS::Senlin::Profile - properties: - type: os.nova.server-1.0 - properties: - flavor: {get_param: flavor} - image: {get_param: image} - key_name: {get_param: key_name} - networks: - - network: {get_param: network} - security_groups: - - {get_resource: security_group} - -The resource below defines to create a Senlin cluster with two nodes at least: - -.. code-block:: yaml - - cluster: - type: OS::Senlin::Cluster - properties: - desired_capacity: 2 - min_size: 2 - profile: {get_resource: profile} - -The two resources below define scale_in_policy and scale_out_policy attached to -the created cluster. Where, the property of event is used to define the objective -action the policy works. When type of the property of adjustment is set as -CHANGE_IN_CAPACITY, the cluster will increase the number of nodes when scale_out or -decrease the number of nodes when scale_in: - -.. code-block:: yaml - - scale_in_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - scale_out_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_OUT - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - -The resource below defines a lb_policy to be attached to the target cluster. Once -the policy is attached to the cluster, Senlin will automatically create loadbalancer, -pool, and health_monitor by invoking neutron LBaas V2 APIs for load-balancing purpose: - -.. code-block:: yaml - - lb_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.loadbalance-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - pool: - protocol: HTTP - protocol_port: 80 - subnet: {get_param: pool_subnet} - lb_method: ROUND_ROBIN - vip: - subnet: {get_param: vip_subnet} - protocol: HTTP - protocol_port: 80 - health_monitor: - type: HTTP - delay: 10 - timeout: 5 - max_retries: 4 - -The two resources below define the receivers to be triggered when a certain alarm or -event occurs: - -.. code-block:: yaml - - receiver_scale_out: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_OUT - type: webhook - - receiver_scale_in: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_IN - type: webhook - -The resource below define the policy for selecting candidate nodes for deletion when -the cluster is to be shrank: - -.. code-block:: yaml - - deletion_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.deletion-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - criteria: YOUNGEST_FIRST - destroy_after_deletion: True - grace_period: 20 - reduce_desired_capacity: False - -The two resources below define the alarms to trigger the above two receivers respectively. -We use the average rate of incoming bytes at LoadBalancer as the metrics to trigger the -scaling operations: - -.. code-block:: yaml - - scale_in_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth overflow - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 180 - evaluation_periods: 1 - threshold: 12000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_in, channel, alarm_url]} - comparison_operator: le - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - - scale_out_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth insufficient - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 60 - evaluation_periods: 1 - threshold: 28000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_out, channel, alarm_url]} - comparison_operator: ge - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - -Deployment Steps -~~~~~~~~~~~~~~~~ - -Before the deployment, please ensure that neutron LBaas v2 and -ceilometer/Aodh has been installed and configured in your environment. - -Step one is to generate key-pair using the followed command: - -.. code-block:: console - - $ openstack keypair create heat_key - -Step two is to create a heat template as by downloading the template file -from `heat template`_. - -Step three is to create a heat stack using the followed command: - -.. code-block:: console - - $ openstack stack create test -t ./ex_aslb.yaml --parameter "key_name=heat_key" - -The steps and samples introduced in this tutorial can also work -well together with composition of ceilometer, Aodh, and Gnocchi -without any change. - -.. _heat template: https://opendev.org/openstack/senlin/src/branch/master/doc/source/scenarios/ex_lbas.yaml diff --git a/doc/source/scenarios/autoscaling_overview.rst b/doc/source/scenarios/autoscaling_overview.rst deleted file mode 100644 index 516f5177d..000000000 --- a/doc/source/scenarios/autoscaling_overview.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _scenario-affinity: - -====================== -Support to AutoScaling -====================== - -The senlin service provides a rich set of facilities for building an -auto-scaling solution: - -- *Operations*: The ``CLUSTER_SCALE_OUT``, ``CLUSTER_SCALE_IN`` operations are - the simplest form of commands to scale a cluster. The ``CLUSTER_RESIZE`` - operation, on the other hand, provides more options for controlling the - detailed cluster scaling behavior. These operations can be performed with - and without policies attached to a cluster. - -- *Policies*: - - The ``senlin.policy.scaling`` (:doc:`link <../user/policy_types/scaling>`) - policy can be applied to fine tune the cluster scaling operations. - - The ``senlin.policy.deletion`` (:doc:`link <../user/policy_types/deletion>`) - policy can be attached to a cluster to control how nodes are removed from a - cluster. - - The ``senlin.policy.affinity`` (:doc:`link <../user/policy_types/affinity>`) - policy can be used to control how node affinity or anti-affinity can be - enforced. - - The ``senlin.policy.region_placement`` - (:doc:`link <../user/policy_types/region_placement>`) can be applied to - scale a cluster across multiple regions. - - The ``senlin.policy.zone_placement`` - (:doc:`link <../user/policy_types/zone_placement>`) can be enforced to - achieve a cross-availability-zone node distribution. - -- *Receivers*: The receiver (:doc:`link <../user/receivers>`) concept provides a - channel to which you can send signals or alarms from an external monitoring - software or service so that scaling operations can be automated. - -This section provides some guides on integrating senlin with other services -so that cluster scaling can be automated. diff --git a/doc/source/scenarios/ex_lbas.yaml b/doc/source/scenarios/ex_lbas.yaml deleted file mode 100644 index f8ca6dd5f..000000000 --- a/doc/source/scenarios/ex_lbas.yaml +++ /dev/null @@ -1,167 +0,0 @@ -heat_template_version: 2016-04-08 - -description: > - This template demonstrate how to create a cluster and attach a - loadbalance policy, a scale-out policy and scale-in policy to it. - -parameters: - flavor: - description: Flavor for the instances to be created. - type: string - default: m1.nano - image: - description: Name or ID of the image to use for the instances. - type: string - default: cirros-0.3.5-x86_64-disk - key_name: - description: Name of an existing key pair to use for the instances. - type: string - network: - description: The network for the instances. - type: string - default: private - pool_subnet: - description: Subnet for the port on which members can be connected. - type: string - default: private-subnet - vip_subnet: - description: Subnet on which VIP address will be allocated. - type: string - default: private-subnet - -resources: - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - protocol: icmp - - protocol: tcp - port_range_min: 22 - port_range_max: 22 - - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - profile: - type: OS::Senlin::Profile - properties: - type: os.nova.server-1.0 - properties: - flavor: {get_param: flavor} - image: {get_param: image} - key_name: {get_param: key_name} - networks: - - network: {get_param: network} - security_groups: - - {get_resource: security_group} - - cluster: - type: OS::Senlin::Cluster - properties: - desired_capacity: 2 - min_size: 2 - profile: {get_resource: profile} - - scale_in_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - scale_out_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.scaling-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - event: CLUSTER_SCALE_OUT - adjustment: - type: CHANGE_IN_CAPACITY - number: 1 - - lb_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.loadbalance-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - pool: - protocol: HTTP - protocol_port: 80 - subnet: {get_param: pool_subnet} - lb_method: ROUND_ROBIN - vip: - subnet: {get_param: vip_subnet} - protocol: HTTP - protocol_port: 80 - health_monitor: - type: HTTP - delay: 10 - timeout: 5 - max_retries: 4 - - receiver_scale_out: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_OUT - type: webhook - - receiver_scale_in: - type: OS::Senlin::Receiver - properties: - cluster: {get_resource: cluster} - action: CLUSTER_SCALE_IN - type: webhook - - deletion_policy: - type: OS::Senlin::Policy - properties: - type: senlin.policy.deletion-1.0 - bindings: - - cluster: {get_resource: cluster} - properties: - criteria: YOUNGEST_FIRST - destroy_after_deletion: True - grace_period: 20 - reduce_desired_capacity: False - - scale_in_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth overflow - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 180 - evaluation_periods: 1 - threshold: 12000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_in, channel, alarm_url]} - comparison_operator: le - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} - - scale_out_alarm: - type: OS::Ceilometer::Alarm - properties: - description: trigger when bandwidth insufficient - meter_name: network.services.lb.incoming.bytes.rate - statistic: avg - period: 60 - evaluation_periods: 1 - threshold: 28000 - repeat_actions: True - alarm_actions: - - {get_attr: [receiver_scale_out, channel, alarm_url]} - comparison_operator: ge - query: - metadata.user_metadata.cluster_id: {get_resource: cluster} diff --git a/doc/source/tutorial/autoscaling.rst b/doc/source/tutorial/autoscaling.rst deleted file mode 100644 index cfeff9b17..000000000 --- a/doc/source/tutorial/autoscaling.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-autoscaling: - -=========================== -Making Your Cluster Elastic -=========================== - -Creating Receivers -~~~~~~~~~~~~~~~~~~ - -Suppose you want a cluster to scale out by one node each time an event occurs, -you can create a receiver for this task: - -.. code-block:: console - - $ openstack cluster receiver create --type webhook --cluster mycluster \ - --action CLUSTER_SCALE_OUT so_receiver_1 - +------------+---------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "b2b8fd71c3d54f67ac14e5851c0117b8" | - | | } | - | channel | { | - | | "alarm_url": "" | - | | } | - | cluster_id | 30d7ef94-114f-4163-9120-412b78ba38bb | - | created_at | 2017-02-08T02:08:13Z | - | domain_id | None | - | id | 5722a2b0-1f5f-4a82-9c08-27da9982d46f | - | location | None | - | name | so_receiver_1 | - | params | {} | - | project_id | 36d551c0594b4cc99d1bbff8bf202ec3 | - | type | webhook | - | updated_at | None | - | user_id | 9563fa29642a4efdb1033bf8aab07daa | - +------------+---------------------------------------------------+ - - -The command above creates a receiver named ``so_receiver_1`` which can be used -to initiate a ``CLUSTER_SCALE_OUT`` action on the cluster ``my_cluster``. From -the output of this command, you will find an ``alarm_url`` value from the -``channel`` property. This will be the URL for you to trigger the scaling -operation. - -.. note:: - - You are expected to treat the ``alarm_url`` value as a secret. Any person or - software which knows this value will be able to trigger the scaling operation - on your cluster. This may not be what you wanted. - -The default type of receiver would be "``webhook``". You may choose to create -a "``message``" type of receiver if you have the zaqar messaging service -installed. For more details, please refer to :ref:`ref-receivers`. - -Triggering Scaling -~~~~~~~~~~~~~~~~~~ - -Once you have received a channel from the created receiver, you can use it to -trigger the associated action on the specified cluster. The simplest way to -do this is to use the :command:`curl` command as shown below: - -.. code-block:: console - - $ curl -X POST - -Once the above request is received by the senlin-api, your cluster will be -scaled out by one node. In other words, a new node is created into the -cluster. - - -Creating Scaling Policies -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin provides some builtin policy types to control how a cluster will be -scaled when a relevant request is received. A scaling request can be a simple -``CLUSTER_SCALE_OUT`` or ``CLUSTER_SCALE_IN`` action which can accept an -optional ``count`` argument; it can be a more complex ``CLUSTER_RESIZE`` -action which can accept more arguments for fine-tuning the scaling behavior. - -In the absence of such arguments (which is not uncommon if you are using a -3rd party monitoring software which doesn't have the intelligence to decide -each and every argument), you can always use scaling policies for this -purpose. - -Below is a sample YAML file (:file:`examples/policies/scaling_policy.yaml`) -used for creating a scaling policy object:: - - type: senlin.policy.scaling - version: 1.0 - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_CAPACITY - number: 2 - min_step: 1 - best_effort: True - cooldown: 120 - -To create a policy object, you can use the following command: - -.. code-block:: console - - $ openstack cluster policy create \ - --spec-file examples/policies/scaling_policy.yaml \ - policy1 - +------------+--------------------------------------+ - | Field | Value | - +------------+--------------------------------------+ - | created_at | 2016-12-08T02:41:30.000000 | - | data | {} | - | domain_id | None | - | id | 3ca962c5-68ce-4293-9087-c73964546223 | - | location | None | - | name | policy1 | - | project_id | 36d551c0594b4cc99d1bbff8bf202ec3 | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.scaling", | - | | "properties": { | - | | "adjustment": { | - | | "min_step": 1, | - | | "cooldown": 120, | - | | "best_effort": true, | - | | "number": 1, | - | | "type": "CHANGE_IN_CAPACITY" | - | | }, | - | | "event": "CLUSTER_SCALE_IN" | - | | } | - | | } | - | type | senlin.policy.scaling-1.0 | - | updated_at | None | - | user_id | 9563fa29642a4efdb1033bf8aab07daa | - +------------+--------------------------------------+ - -The next step to enforce this policy on your cluster is to attach the policy -to it, as shown below: - -.. code-block:: console - - $ openstack cluster policy attach --policy policy1 mycluster - Request accepted by action: 89626141-0999-4e76-9795-a86c4cfd531f - - $ openstack cluster policy binding list mycluster - +-----------+-------------+---------------------------+------------+ - | policy_id | policy_name | policy_type | is_enabled | - +-----------+-------------+---------------------------+------------+ - | 3ca962c5 | policy1 | senlin.policy.scaling-1.0 | True | - +-----------+-------------+---------------------------+------------+ - -In future, when your cluster is about to be scaled in (no matter the request -comes from a user or a software or via a receiver), the scaling policy attached -will help determine 1) how many nodes to be removed, 2) whether the scaling -operation should be done on a best effort basis, 3) for how long the cluster -will not respond to further scaling requests, etc. - -For more information on using scaling policy, you can refer to -:ref:`ref-scaling-policy`. diff --git a/doc/source/tutorial/basics.rst b/doc/source/tutorial/basics.rst deleted file mode 100644 index b60882cb4..000000000 --- a/doc/source/tutorial/basics.rst +++ /dev/null @@ -1,179 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-basic: - -============= -Senlin Basics -============= - -.. note:: - - This tutorial assumes that you are working on the master branch of the - senlin source code which contains the latest profile samples and policy - samples. To clone the latest code base: - - .. code-block:: console - - $ git clone https://git.openstack.org/openstack/senlin.git - -Follow the `Installation Guide`_ to install the senlin service. - - -Creating Your First Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A profile captures the necessary elements you need to create a node. The -following is a profile specification (``spec`` for short) that can be used -to create a nova server: - -.. literalinclude:: ../../../examples/profiles/nova_server/cirros_basic.yaml - :language: yaml - -.. note:: - The above source file can be found in senlin source tree at - ``/examples/profiles/nova_server/cirros_basic.yaml``. - -The **spec** assumes that: - -- you have a nova keypair named ``oskey``, and -- you have a neutron network named ``private``, and -- there is a glance image named ``cirros-0.3.5-x86_64-disk`` - -You may have to change the values based on your environment setup before using -this file to create a profile. After the **spec** file is modified properly, -you can use the following command to create a profile object: - -.. code-block:: console - - $ cd $SENLIN_ROOT/examples/profiles/nova_server - $ openstack cluster profile create --spec-file cirros_basic.yaml myserver - -Check the :doc:`Profiles <../user/profiles>` section in the -:ref:`user-references` documentation for more details. - -Creating Your First Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With a profile created, we can proceed to create a cluster by specifying the -profile and a cluster name. - -.. code-block:: console - - $ openstack cluster create --profile myserver mycluster - -If you don't explicitly specify a number as the desired capacity of the -cluster, senlin won't create nodes in the cluster. That means the newly -created cluster is empty. If you do provide a number as the desired capacity -for the cluster as shown below, senlin will create the specified number of -nodes in the cluster. - -.. code-block:: console - - $ openstack cluster create --profile myserver --desired-capacity 1 mycluster - $ openstack cluster show mycluster - -For more details, check the :doc:`Creating a Cluster <../user/clusters>` -section in the :ref:`user-references` documentation. - - -Scaling a Cluster -~~~~~~~~~~~~~~~~~ - -Now you can try to change the size of your cluster. To increase the size, -use the following command: - -.. code-block:: console - - $ openstack cluster expand mycluster - $ openstack cluster show mycluster - -To decrease the size of the cluster, use the following command: - -.. code-block:: console - - $ openstack cluster shrink mycluster - $ openstack cluster show mycluster - -For more details, please check the :doc:`Resizing a Cluster <../user/clusters>` -section in the :ref:`user-references` section. - - -Resizing a Cluster -~~~~~~~~~~~~~~~~~~ - -Yet another way to change the size of a cluster is to use the command -``cluster-resize``: - -.. code-block:: console - - $ openstack cluster resize --capacity 2 mycluster - $ openstack cluster show mycluster - -The ``cluster-resize`` command supports more flexible options to control how -a cluster is resized. For more details, please check the -:doc:`Resizing a Cluster <../user/clusters>` section in the -:ref:`user-references` section. - - -Creating a Node ---------------- - -Another way to manage cluster node membership is to create a standalone node -then add it to a cluster. To create a node using a given profile: - -.. code-block:: console - - $ openstack cluster node create --profile myserver newnode - $ openstack cluster node show newnode - -For other options supported by the ``node-create`` command, please check the -:doc:`Creating a Node <../user/nodes>` subsection in the -:ref:`user-references` documentation. - - -Adding a Node to a Cluster --------------------------- - -If a node has the same profile type as that of a cluster, you can add the node -to the cluster using the ``cluster-node-add`` command: - -.. code-block:: console - - $ openstack cluster members add --nodes newnode mycluster - $ openstack cluster members list mycluster - $ openstack cluster show mycluster - $ openstack cluster node show newnode - -After the operation is completed, you will see that the node becomes a member -of the target cluster, with an index value assigned. - -Removing a Node from a Cluster ------------------------------- - -You can also remove a node from a cluster using the ``cluster-node-del`` -command: - -.. code-block:: console - - $ openstack cluster members del --nodes newnode mycluster - $ openstack cluster members list mycluster - $ openstack cluster show mycluster - $ openstack cluster node show newnode - -For other cluster membership management commands and options, please check the -:doc:`Cluster Membership <../user/membership>` section in the -:ref:`user-references` section. - - -.. _Installation Guide: https://docs.openstack.org/senlin/latest/install diff --git a/doc/source/tutorial/policies.rst b/doc/source/tutorial/policies.rst deleted file mode 100644 index 5dd961418..000000000 --- a/doc/source/tutorial/policies.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-policies: - -===================== -Working with Policies -===================== - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -A policy contains the set of rules that are checked/enforced before or -after certain cluster operations are performed. The detailed specification -of a specific policy type is provided as the ``spec`` of a policy object -when it is created. The following is a sample ``spec`` for a deletion policy: - -.. literalinclude:: ../../../examples/policies/deletion_policy.yaml - :language: yaml - -.. note:: - The above source file can be found in senlin source tree at - ``/examples/policies/deletion_policy.yaml``. - -To create a policy object using this specification (``spec`` for short): - -.. code-block:: console - - $ cd $SENLIN_ROOT/examples/policies - $ openstack cluster policy create --spec-file deletion_policy.yaml dp01 - -To verify the policy creation, you can do: - -.. code-block:: console - - $ openstack cluster policy list - $ openstack cluster policy show dp01 - -Attaching a Policy -~~~~~~~~~~~~~~~~~~ - -The enforce a policy on a cluster, attach a policy to it: - -.. code-block:: console - - $ openstack cluster policy attach --policy dp01 mycluster - -To verify the policy attach operation, do the following: - -.. code-block:: console - - $ openstack cluster policy binding list mycluster - $ openstack cluster policy binding show --policy dp01 mycluster - -Verifying a Policy -~~~~~~~~~~~~~~~~~~ - -To verify the deletion policy attached to the cluster ``mycluster``, you -can try expanding the cluster, followed by shrinking it: - -.. code-block:: console - - $ openstack cluster members list mycluster - $ openstack cluster expand mycluster - $ openstack cluster members list mycluster - $ openstack cluster shrink mycluster - $ openstack cluster members list mycluster - -After the scale-in operation is completed, you will find that the oldest -node from the cluster is removed. If you want to remove the youngest node -instead, you can create a different deletion policy with a different -specification. - -For more details about policy types and policy management, check the -:doc:`Policy Types <../user/policy_types>` section and the -:doc:`Policies <../user/policies>` section in the -:ref:`user-references` documentation respectively. -You may also want to check the -:doc:`Cluster-Policy Bindings <../user/bindings>` section in the -:ref:`user-references` section for more details on managing the cluster-policy -relationship. diff --git a/doc/source/tutorial/receivers.rst b/doc/source/tutorial/receivers.rst deleted file mode 100644 index 5db2050eb..000000000 --- a/doc/source/tutorial/receivers.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _tutorial-receivers: - -====================== -Working with Receivers -====================== - -Receivers are the event sinks associated to senlin clusters. When -certain events (or alarms) are seen by a monitoring software, the software can -notify the senlin clusters of those events (or alarms). When senlin receives -those notifications, it can automatically trigger some predefined operations -with preset parameter values. - -Creating a Receiver -~~~~~~~~~~~~~~~~~~~ - -To create a receiver, you need to specify the target cluster and the target -action to be triggered in future. For example, the following command creates -a receiver that will trigger the ``CLUSTER_SCALE_IN`` operation on the target -cluster: - -.. code-block:: console - - $ openstack cluster receiver create --cluster mycluster --action CLUSTER_SCALE_IN w_scale_in - -The output from the command will be something like this: - -.. code-block:: console - - $ openstack cluster receiver create --cluster mycluster --action CLUSTER_SCALE_IN w_scale_in - +------------+-------------------------------------------------------------------------+ - | Field | Value | - +------------+-------------------------------------------------------------------------+ - | action | CLUSTER_SCALE_IN | - | actor | { | - | | "trust_id": "1bc958f5780b4ad38fb6583701a9f39b" | - | | } | - | channel | { | - | | "alarm_url": "http://node1:8777/v1/webhooks/5dacde18-.../trigger?V=2" | - | | } | - | cluster_id | 7fb3d988-3bc1-4539-bd5d-3f72e8d6e0c7 | - | created_at | 2016-05-23T01:36:39 | - | domain_id | None | - | id | 5dacde18-661e-4db4-b7a8-f2a6e3466f98 | - | location | None | - | name | w_scale_in | - | params | None | - | project_id | eee0b7c083e84501bdd50fb269d2a10e | - | type | webhook | - | updated_at | None | - | user_id | ab79b9647d074e46ac223a8fa297b846 | - +------------+-------------------------------------------------------------------------+ - -From the output of the ``openstack cluster receiver create`` command, -you can see: - -- There is a ``type`` property whose value is set to ``webhook`` default which is one of - the receiver types senlin supports. -- There is a ``channel`` property which contains an ``alarm_url`` key. The - value of the ``alarm_url`` is the endpoint for your to post a request. - -Triggering a Receiver with CURL -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once we have a receiver created, you can test it by triggering the specified -action using tools like ``curl``. - -.. code-block:: console - - $ curl -X POST http://node1:8777/v1/webhooks/5dacde18-661e-4db4-b7a8-f2a6e3466f98/trigger?V=2 - -After a while, you can check that the cluster has been shrunk by 1 node. - -For more details about managing receivers, please check the -:doc:`Receivers <../user/receivers>` section in the -:ref:`user-references` documentation. diff --git a/doc/source/user/actions.rst b/doc/source/user/actions.rst deleted file mode 100644 index 1bde34742..000000000 --- a/doc/source/user/actions.rst +++ /dev/null @@ -1,184 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-actions: - - -======= -Actions -======= - -Concept -~~~~~~~ - -An :term:`Action` is an operation that can be performed on a :term:`Cluster` -or a :term:`Node`. Each action is executed asynchronously by a worker thread -after being created. Most Senlin APIs are executed asynchronously inside the -Senlin engine except for some object retrieval or object listing APIs. - -Different types of objects support different sets of actions. For example, a -cluster object supports the following actions: - -* ``CREATE``: creates a cluster; -* ``DELETE``: deletes a cluster; -* ``UPDATE``: update the properties and/or the profile used by a cluster; -* ``ADD_NODES``: add existing nodes to a cluster; -* ``DEL_NODES``: remove nodes from a cluster; -* ``ATTACH_POLICY``: attach the specified policy to a cluster; -* ``DETACH_POLICY``: detach the specified policy from a cluster; -* ``UPDATE_POLICY``: update the specified policy on a cluster; -* ``SCALE_IN``: shrink the size of a cluster; -* ``SCALE_OUT``: inflate the size of a cluster; -* ``RESIZE``: resize a cluster; -* ``CHECK``: check a cluster; -* ``RECOVER``: recover a cluster; -* ``REPLACE_NODES``: replace the nodes in cluster with specified nodes; -* ``OPERATION``: perform an operation on the specified cluster; - -A node object supports the following actions: - -* ``CREATE``: creates a node; -* ``DELETE``: deletes a node; -* ``UPDATE``: updates the properties and/or the profile used by a node; -* ``CHECK``: check a node; -* ``RECOVER``: recover a node; -* ``OPERATION``: perform an operation on the specified node; - -In future, Senlin may support user defined actions (UDAs). - - -Listing Actions -~~~~~~~~~~~~~~~ - -The following command shows the actions known by the Senlin engine:: - - $ openstack cluster action list - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - | id | name | action | status | target_id| depends_on | depended_by | created_at | - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - | 1189f5e8 | node_create_b825fb74 | NODE_CREATE | SUCCEEDED | b825fb74 | | | 2016-09-22T10:13:24Z | - | 2454c28a | node_delete_c035c519 | NODE_DELETE | SUCCEEDED | c035c519 | | | 2016-09-22T10:53:09Z | - | 252b9491 | node_create_c035c519 | NODE_CREATE | SUCCEEDED | c035c519 | | | 2016-09-22T10:54:09Z | - | 34802f3b | cluster_create_7f37e191 | CLUSTER_CREATE | SUCCEEDED | 7f37e191 | | | 2016-09-22T11:04:00Z | - | 4250bf29 | cluster_delete_7f37e191 | CLUSTER_DELETE | SUCCEEDED | 7f37e191 | | | 2016-09-22T11:06:32Z | - | 67cbcfb5 | node_delete_b825fb74 | NODE_DELETE | SUCCEEDED | b825fb74 | | | 2016-09-22T11:14:04Z | - | 6e661db8 | cluster_create_44762dab | CLUSTER_CREATE | SUCCEEDED | 44762dab | | | 2016-09-22T11:14:44Z | - | 7bfad7ed | node_delete_b716052d | NODE_DELETE | SUCCEEDED | b716052d | | | 2016-09-22T11:15:22Z | - | b299cf44 | cluster_delete_44762dab | CLUSTER_DELETE | SUCCEEDED | 44762dab | | | 2016-09-22T11:18:18Z | - | e973552e | node_create_b716052d | NODE_CREATE | SUCCEEDED | b716052d | | | 2016-09-22T11:25:58Z | - +----------+-------------------------+----------------+-----------+----------+------------+-------------+----------------------+ - -The :program:`openstack cluster` command line supports various options when -listing the actions. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list actions, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are action properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command instructs the :program:`openstack cluster` -command to sort actions using the ``name`` property in descending order:: - - $ openstack cluster action list --sort name:desc - -When sorting the list of actions, you can use one of ``name``, ``target``, -``action``, ``created_at`` and ``status``. - - -Filtering the List ------------------- - -You can filter the list of actions using the :option:`--filters``. For example, -the following command filters the action list by the ``action`` property:: - - $ openstack cluster action list --filters action=CLUSTER_SCALE_OUT - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each pair is expected to be of format ``key=val``. -The valid keys for filtering include ``name``, ``target``, ``action`` and -``status`` or any combination of them. - - -Paginating the Query results ----------------------------- - -In case you have a huge collection of actions (which is highly likely the -case), you can limit the number of actions returned using the option -:option:`--limit `. For example:: - - $ openstack cluster action list --limit 1 - -Another option you can specify is the ID of an action after which you want to -see the returned list starts. In other words, you don't want to see those -actions with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster action list --limit 1 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - -Only 1 action record is returned in this example and its UUID comes after the -one specified from the command line. - - -Showing Details of an Action -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :program:`openstack cluster` command to show the details about -an action you are interested in. When specifying the identity of the action, -you can use its name, its ID or its "short ID" . Senlin API and engine will -verify if the identifier you specified can uniquely identify an action. An -error message will be returned if there is no action matching the identifier -or if more than one action matching it. - -An example is shown below:: - - $ openstack cluster action show 8fac487f - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | action | CLUSTER_DELETE | - | cause | RPC Request | - | created_at | 2016-09-23T09:00:25Z | - | depended_by | | - | depends_on | | - | domain_id | None | - | end_at | 1450683904.0 | - | id | 8fac487f-861a-449e-9678-478133bea8de | - | inputs | {} | - | interval | -1 | - | location | None | - | name | cluster_delete_7deb546f | - | outputs | {} | - | owner_id | None | - | project_id | bdeecc1b58004bb19302da77ac056b44 | - | start_at | 1450683904.0 | - | status | SUCCEEDED | - | status_reason | Action completed successfully. | - | target_id | 7deb546f-fd1f-499a-b120-94f8f07fadfb | - | timeout | 3600 | - | updated_at | None | - | user_id | f3cdb8010bb349d5bdff2815d8f007a1 | - +---------------+--------------------------------------+ - - -See Also -~~~~~~~~ - -* :doc:`Creating Receivers ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/bindings.rst b/doc/source/user/bindings.rst deleted file mode 100644 index e6db0f412..000000000 --- a/doc/source/user/bindings.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-bindings: - -======================= -Cluster-Policy Bindings -======================= - -Concept -~~~~~~~ - -A :term:`Policy` object can be attached to at least one :term:`Cluster` at the -same time. A cluster at any time can have more than one Policy objects -attached to it. - -After a policy object is attached to a cluster, you can still enable or -disable it or update some properties of the policy object. - - -Listing Policies Attached to a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command provides a sub-command -:command:`policy binding list` to list policy objects that are attached to a -cluster. You can provide the name, the ID or the "short ID" of a cluster as -the identifier to reference a cluster. For example, the command below lists -the policies attached to the cluster ``webservers``:: - - $ openstack cluster policy binding list webservers - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list cluster -policies, using the option :option:`--sort`. The :option:`--sort` option -accepts a string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the -keys used are properties of the policy bound to a cluster and the dirs can be -one of ``asc`` and ``desc``. When omitted, Senlin sorts a given key using -``asc`` as the default direction. - -For example, the following command line sorts the policy bindings using the -``enabled`` property in descending order:: - - $ openstack cluster policy binding list --sort enabled:desc c3 - -When sorting the list of policies, ``enabled`` is the only key you can specify -for sorting. - - -Filtering the List ------------------- - -The :program:`openstack cluster` command also supports options for filtering -the policy list at the server side. The option :option:`--filters` can be used -for this purpose. For example, the following command filters clusters by the -``is_enabled`` field:: - - $ openstack cluster policy binding list --filters enabled=True c3 - +-----------+-------------+---------------------------+------------+ - | policy_id | policy_name | policy_type | is_enabled | - +-----------+-------------+---------------------------+------------+ - | 0705f0f4 | up01 | senlin.policy.scaling-1.0 | True | - +-----------+-------------+---------------------------+------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each key-value pair is expected to be of format -``=``. The only key that can be used for filtering as of today is -``enabled``. - - -Attaching a Policy to a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin permits policy objects to be attached to clusters and to be detached -from clusters dynamically. When attaching a policy object to a cluster, you -can customize the policy properties for the particular cluster. For example, -you can specify whether the policy should be enabled once attached. - -The following options are supported for the command -:command:`openstack cluster policy attach`: - -- :option:`--enabled`: a boolean indicating whether the policy to be enabled - once attached. - -For example, the following command attaches a policy named ``up01`` to the -cluster ``c3``, When a policy is attached to a cluster, it is enabled by -default. To keep it disabled, the user can use the parameter ``--enabled False``. -For example:: - - $ openstack cluster policy attach --policy up01 --enabled False c3 - -Note that most of the time, Senlin doesn't allow more than one policy of the -same type to be attached to the same cluster. This restriction is relaxed for -some policy types. For example, when working with policies about scaling, you -can actually attach more than one policy instances to the same cluster, each of -which is about a specific scenario. - -For the identifiers specified for the cluster and the policy, you can use the -name, the ID or the "short ID" of an object. The Senlin engine will try make a -guess on each case. If no entity matches the specified identifier or there are -more than one entity matching the identifier, you will get an error message. - - -Showing Policy Properties on a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To examine the detailed properties of a policy object that has been attached -to a cluster, you can use the :command:`openstack cluster policy binding show` -command with the policy identifier and the cluster identifier specified. For -example:: - - $ openstack cluster policy binding show --policy dp01 c3 - +--------------+--------------------------------------+ - | Field | Value | - +--------------+--------------------------------------+ - | cluster_name | c3 | - | data | None | - | id | 2b7e9294-b5cd-470f-b191-b18f7e672495 | - | is_enabled | True | - | location | None | - | name | None | - | policy_id | 239d7212-6196-4a89-9446-44d28717d7de | - | policy_name | dp01 | - | policy_type | senlin.policy.deletion-1.0 | - +--------------+--------------------------------------+ - -You can use the name, the ID or the "short ID" of a policy and/or a cluster to -name the objects. - - -Updating Policy Properties on a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once a policy is attached to a cluster, you can request its property on this -cluster be changed by using the command -:command:`openstack cluster policy binding update`. Presently, you can only -specify the ``enabled`` property to be updated. - -For example, the following command disables a policy on the specified cluster:: - - $ openstack cluster policy binding update \ - --enabled False --policy dp01 \ - mycluster - -The Senlin engine will perform validation of the arguments in the same way as -that for the policy attach operation. You can use the name, the ID or the -"short ID" of an entity to reference it, as you do with the policy attach -operation as well. - - -Detach a Policy from a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Finally, to remove the binding between a specified policy object from a -cluster, you can use the :command:`openstack cluster policy detach` command as -shown below:: - - $ openstack cluster policy detach --policy dp01 mycluster - -This command will detach the specified policy from the specified cluster. -You will use the option :option:`--policy` to specify the policy. diff --git a/doc/source/user/clusters.rst b/doc/source/user/clusters.rst deleted file mode 100644 index f676b0638..000000000 --- a/doc/source/user/clusters.rst +++ /dev/null @@ -1,503 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-clusters: - -======== -Clusters -======== - -Concept -~~~~~~~ - -A :term:`Cluster` is a group of logical objects, each of which is called a -:term:`Node` in Senlin's terminology. A cluster can contain zero or more -nodes. A cluster has a ``profile_id`` property that specifies which default -:term:`Profile` to use when new nodes are created/scaled as members of the -cluster. It is valid for nodes in a cluster to reference different profile -objects because Senlin only mandates that all nodes in a cluster having the -same **profile type**. - -Senlin provides APIs and command line supports to manage the cluster -membership. Please refer to :ref:`ref-membership` for details. Senlin also -supports attaching :term:`Policy` objects to a cluster, customizing the policy -properties when attaching a policy to a cluster. Please refer to -:ref:`ref-bindings` for details. - -Listing Clusters -~~~~~~~~~~~~~~~~ - -The following command shows the clusters managed by the Senlin service:: - - $ openstack cluster list - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -Note that the first column in the output table is a *short ID* of a cluster -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command:: - - $ openstack cluster list --full-id - +--------------------+------+--------+--------------------- +------------+ - | id | name | status | created_at | updated_at | - +--------------------+------+--------+----------------------+------------+ - | 2959122e-11c7-.... | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955-2645-.... | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +--------------------+------+--------+----------------------+------------+ - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list clusters, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are cluster properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the clusters using the ``name`` -property in descending order:: - - $ openstack cluster list --sort name:desc - -When sorting the list of clusters, you can use one of ``name``, ``status``, -``init_at``, ``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -The :program:`openstack cluster list` command also provides options for -filtering the cluster list at the server side. The option :option:`--filters` -can be used for this purpose. For example, the following command filters the -clusters by the ``status`` field:: - - $ openstack cluster list --filters status=ACTIVE - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each key-value pair is expected to be of format -``=``. The valid keys for filtering include: ``status``, ``name``, -``project`` and ``user``. - - -Paginating the Query Results ----------------------------- - -In case you have a huge collection of clusters, you can limit the number of -clusters returned from Senlin server each time, using the option -:option:`--limit `. For example:: - - $ openstack cluster list --limit 1 - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 2959122e | c1 | ACTIVE | 2015-05-05T13:27:28Z | None | - +----------+------+--------+----------------------+------------+ - -Another option you can specify is the ID of a cluster after which you want to -see the returned list starts. In other words, you don't want to see those -clusters with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster list --limit 1 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - +----------+------+--------+----------------------+------------+ - | id | name | status | created_at | updated_at | - +----------+------+--------+----------------------+------------+ - | 092d0955 | c2 | ACTIVE | 2015-05-05T13:27:48Z | None | - +----------+------+--------+----------------------+------------+ - -Only 1 cluster record is returned in this example and its UUID comes after the -one specified from the command line. - - -Creating a Cluster -~~~~~~~~~~~~~~~~~~ - -To create a cluster, you need to provide the ID or name of the profile to be -associated with the cluster. For example:: - - $ openstack cluster create --profile qstack c3 - +------------------+--------------------------------------+ - | Property | Value | - +------------------+--------------------------------------+ - | config | {} | - | created_at | None | - | data | {} | - | dependents | {} | - | desired_capacity | 0 | - | domain_id | None | - | id | 60424eb3-6adf-4fc3-b9a1-4a035bf171ac | - | init_at | 2015-05-05T13:35:47Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | c3 | - | node_ids | | - | profile_id | bf38dc9f-d204-46c9-b515-79caf1e45c4d | - | profile_name | qstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | status | INIT | - | status_reason | Initializing | - | timeout | 3600 | - | updated_at | None | - | user_id | 0b82043b57014cd58add97a2ef79dac3 | - +------------------+--------------------------------------+ - -From the output you can see that a new cluster object created and put to -``INIT`` status. Senlin will verify if profile specified using the option -:option:`--profile ` does exist. The server allows the ```` -value to be a profile name, a profile ID or the short ID of a profile object. -If the profile is not found or multiple profiles found matching the value, you -will receive an error message. - - -Controlling Cluster Capacity ----------------------------- - -When creating a cluster, by default :program:`senlin` will create a cluster -with no nodes, i.e. the ``desired_capacity`` will be set to 0. However, you -can specify the desired capacity of the cluster, the maximum size and/or the -minimum size of the cluster. The default value for ``min_size`` is 0 and the -default value for ``max_size`` is -1, meaning that there is no upper bound for -the cluster size. - -The following command creates a cluster named "``test_cluster``", with its -desired capacity set to 2, its minimum size set to 1 and its maximum size set -to 3:: - - $ openstack cluster create --desired-capacity 2 \ - --min-size 1 --max-size 3 \ - --profile myprofile \ - test_cluster - -Senlin API and Senlin engine will validate the settings for these capacity -arguments when receiving this request. An error message will be returned if -the arguments fail to pass this validation, or else the cluster creation -request will be queued as an action for execution. - -When ``desired_capacity`` is not specified and ``min_size`` is not specified, -Senlin engine will create an empty cluster. When either ``desired_capacity`` -or ``min_size`` is specified, Senlin will start the process of creating nodes -immediately after the cluster object is created. - - -Other Properties ----------------- - -You can use the option :option:`--metadata` (or :option:`-M`) to associate -some key-value pairs to the cluster to be created. These data are referred to -as the "metadata" for the cluster. - -Since cluster operations may take some time to finish when being executed and -Senlin interacts with the backend services to make it happen, there needs a -way to verify whether an operation has timed out. When creating a cluster -using the :program:`openstack cluster create` command line, you can use the -option :option:`--timeout ` to specify the default time out in number -of seconds. This value would be the global setting for the cluster. - -You can use the option :option:`--config` to pass in key-value pairs to the -cluster to be created. The following config properties are supported: - -- ``node.name.format``: Specify how cluster nodes are automatically named. - The value can contain placeholders like ``$nI`` for node index padded with - n number of zeros to the left, or ``$nR`` for random string of length n. - -- ``cluster.stop_node_before_delete``: If set to True, cluster operations that - result in a node deletion (e.g. scale-in, resize, etc) will request a node - stop first. Once the node has been successfully shutdown, the node is - deleted. The default setting is False for which a cluster performs a node - delete without stopping the node. - -- ``cluster.stop_timeout_before_update``: Specifies the timeout value in - seconds to wait for when stopping a node as part of a cluster update - operation. If this value is not set, the value for default_nova_timeout in - the configuration will be used. - - -Showing Details of a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When there are clusters in the Senlin database, you can request Senlin to show -the details about a cluster you are interested in. - -You can use the name, the ID or the "short ID" of a cluster to name a cluster -for show. Senlin API and engine will verify if the identifier you specified -can uniquely identify a cluster. An error message will be returned if there is -no cluster matching the identifier or if more than one cluster matching it. - -An example is shown below:: - - $ openstack cluster show c3 - +------------------+--------------------------------------+ - | Field | Value | - +------------------+--------------------------------------+ - | config | {} | - | created_at | 2015-07-07T03:30:53Z | - | data | {} | - | dependents | {} | - | desired_capacity | 2 | - | domain_id | None | - | id | 2b7e9294-b5cd-470f-b191-b18f7e672495 | - | init_at | 2015-05-07T03:30:52Z | - | location | None | - | max_size | -1 | - | metadata | {} | - | min_size | 0 | - | name | c3 | - | node_ids | b28692a5-2536-4921-985b-1142d6045e1f | - | | 4be10a88-e340-4518-a9e1-d742c53ac37f | - | profile_id | bf38dc9f-d204-46c9-b515-79caf1e45c4d | - | profile_name | qstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | status | ACTIVE | - | status_reason | Node stack2: Creation succeeded | - | timeout | 3600 | - | updated_at | None | - | user_id | 0b82043b57014cd58add97a2ef79dac3 | - +------------------+--------------------------------------+ - -From the result, you can examine the list of nodes (if any) that are members -of this cluster. - - -Updating a Cluster -~~~~~~~~~~~~~~~~~~ - -Once a cluster has been created, you change its properties using the -:program:`openstack cluster update` command. For example, to change the name -of a cluster, you can use the following command:: - - $ openstack cluster update --name web_bak web_servers - -You can change the ``timeout`` property using option :option:`--timeout`. -You can change the metadata associated with cluster using option -:option:`--metadata`. - -Using the :command:`openstack cluster update` command, you can change the -profile used by the cluster and its member nodes. The following example -launches a global update on the cluster for switching to a different profile:: - - $ openstack cluster update --profile fedora21_server web_cluster - -Suppose the cluster ``web_cluster`` is now using a profile of type -``os.nova.server`` where a Fedora 20 image is used, the command above will -initiate a global upgrade to a new profile where a Fedora 21 image is used. - -Senlin engine will verify whether the new profile has the same profile type -with that of the existing one and whether the new profile has a well-formed -``spec`` property. If everything is fine, the engine will start a node level -profile update process. The node level update operation is subject to policy -checkings/enforcements when there is an update policy attached to the cluster. -Please refer to :ref:`ref-policies` and :ref:`ref-bindings` for more -information. - - -Resizing a Cluster -~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line supports several different -sub-commands to resize a cluster. - - -``openstack cluster resize`` ----------------------------- - -The command :command:`openstack cluster resize` takes several arguments that -allow you to resize a cluster in various ways: - -- you can change the size of a cluster to a specified number; -- you can add a specified number of nodes to a cluster or remove a specified - number of nodes from a cluster; -- you can instruct :program:`openstack cluster resize` to resize a cluster by - a specified percentage; -- you can tune the ``min_size`` and/or ``max_size`` property of a cluster when - resizing it; -- you can request a size change made on a best-effort basis, if the resize - operation cannot be fully realized due to some restrictions, this argument - tells Senlin engine whether it is still expected to partially realize the - resize operation. - -You can specify one and only one of the following options for the -:command:`openstack cluster resize` command: - -- use :option:`--capacity ` to specify - the exact value of the new cluster size; -- use :option:`--adjustment ` to - specify the relative number of nodes to add/remove; -- use :option:`--percentage ` to - specify the percentage of cluster size change. - -The following command resizes the cluster ``test_cluster`` to 2 nodes, -provided that the ``min_size`` is less than or equal to 2 and the ``max_size`` -is either no less than 2 or equal to -1 (indicating that there is no upper -bound for the cluster size). This command makes use of the option -:option:`--capacity `, where ```` is the new size of the -cluster:: - - $ openstack cluster resize --capacity 2 test_cluster - -Another way to resize a cluster is by specifying the :option:`--adjustment -` option, where ```` can be a positive or a negative -integer giving the number of nodes to add or remove respectively. For example, -the following command adds two nodes to the specified cluster:: - - $ openstack cluster resize --adjustment 2 test_cluster - -The following command removes two nodes from the specified cluster:: - - $ openstack cluster resize --adjustment -2 test_cluster - -Yet another way to resize a cluster is by specifying the size change in -percentage. You will use the option :option:`--percentage ` for -this purpose. The ```` value can be either a positive float value -or a negative float value giving the percentage of cluster size. For example, -the following command increases the cluster size by 30%:: - - $ openstack cluster resize --percentage 30 test_cluster - -The following command decreases the cluster size by 25%:: - - $ openstack cluster resize --percentage -25 test_cluster - -Senlin engine computes the actual number of nodes to add or to remove based on -the current size of the cluster, the specified percentage value, the -constraints (i.e. the ``min_size`` and the ``max_size`` properties). - -When computing the new capacity for the cluster, senlin engine will determine -the value based on the following rules: - -- If the value of new capacity is greater than 1.0 or less than -1.0, it will - be rounded to the integer part of the value. For example, 3.4 will be rounded - to 3, -1.9 will be rounded to -1; -- If the value of the new capacity is between 0 and 1, Senlin will round it up - to 1; -- If the value of the new capacity is between 0 and -1, Senlin will round it - down to -1; -- The new capacity should be in the range of ``min_size`` and ``max_size``, - inclusively, unless option :option:`--strict` is specified; -- The range checking will be performed against the current size constraints if - no new value for ``min_size`` and/or ``max_size`` is given, or else Senlin - will first verify the new size constraints and perform range checking - against the new constraints; -- If option :option:`--min-step ` is specified, the ```` - value will be used if the absolute value of the new capacity value is less - than ````. - -If option :option:`--strict`` is specified, Senlin will strictly conform to -the cluster size constraints. If the capacity value falls out of the range, -the request will be rejected. When :option:`--strict` is set to ``False``, -Senlin engine will do a resize on a best-effort basis. - -Suppose we have a cluster A with ``min_size`` set to 5 and its current size is -7. If the new capacity value is 4 and option :option:`--strict` is set to -``True``, the request will be rejected with an error message. If the new -capacity value is 4 and the option :option:`--strict` is not set, Senlin will -try resize the cluster to 5 nodes. - -Along with the :command:`openstack cluster resize` command, you can specify -the new size constraints using either the option :option:`--min-size` or -the option :option:`--max-size` or both. - - -``openstack cluster shrink`` and ``openstack cluster expand`` -------------------------------------------------------------- - -The :command:`openstack cluster shrink` command and the -:command:`openstack cluster expand` command are provided for convenience when -you want to remove a specific number of nodes from a cluster or add a specific -number of nodes to a cluster, respectively. These two commands both take an -argument ```` which is a positive integer representing the number of -nodes to add or remove. For example, the following command adds two nodes to -the ``web_servers`` cluster:: - - $ openstack cluster expand --count 2 web_servers - -The following command removes two nodes from the ``web_servers`` cluster:: - - $ openstack cluster shrink --count 2 web_servers - -The option :option:`--count ` is optional. If this option is specified, -Senlin will use it for cluster size change, even when there are scaling -policies attached to the cluster. If this option is omitted, however, Senlin -will treat it as implicitly set to value 1. - - -Checking a Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be checked using the :command:`openstack cluster check` -command, for example:: - - $ openstack cluster check mycluster - -All nodes belonging to the specified cluster will perform the check operation. -If a node's physical resource is not ACTIVE, the node status will be changed -as part of the check operation. - - -Recovering a Cluster -~~~~~~~~~~~~~~~~~~~~ -A cluster can be recovered using the :command:`openstack cluster recover` -command, for example:: - - $ openstack cluster recover mycluster --check true - -The option :option:`--check ` is optional. If this option is set, -the cluster will perform check operation before doing recovery. The restore -operation will delete nodes from the specified cluster and recreate it. - - -Deleting a Cluster -~~~~~~~~~~~~~~~~~~ - -A cluster can be deleted using the :command:`openstack cluster delete` -command, for example:: - - $ openstack cluster delete mycluster - -Note that in this command you can use the name, the ID or the "short ID" to -specify the cluster object you want to delete. If the specified criteria -cannot match any clusters, you will get a ``ResourceNotFound`` exception. If -more than one cluster matches the criteria, you will get a ``MultipleChoices`` -exception. - -When there are nodes in the cluster, the Senlin engine will launch a process -to delete all nodes from the cluster and destroy them before deleting the -cluster object itself. - - -See Also -~~~~~~~~ - -There are other operations related to clusters. Please refer to the following -links for operations related to cluster membership management and the creation -and management of cluster-policy bindings: - -- :doc:`Managing Cluster Membership ` -- :doc:`Binding Policies to Clusters ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/events.rst b/doc/source/user/events.rst deleted file mode 100644 index 7d9543c00..000000000 --- a/doc/source/user/events.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-events: - -====== -Events -====== - -An :term:`Event` is a record generated during engine execution. Such an event -captures what has happened inside the senlin-engine. The senlin-engine service -generates event records when it is performing some actions or checking -policies. - -An event has a ``level`` property which can be interpreted as the severity -level value of the event: - -* 10: interpreted as ``DEBUG`` level. Events at this level can be ignored - safely by users. For developers they may provide some useful information for - debugging the code. -* 20: interpreted as ``INFO`` level. Events at this level are mostly about - notifying that some operations have been successfully performed. -* 30: interpreted as ``WARNING`` level. Events at this level are used to - signal some unhealthy status or anomalies detected by the engine. These - events should be monitored and checked when operating a cluster. -* 40: interpreted as ``ERROR`` level. Events at this level signifies some - failures in engine operations. These event should be monitored and checked - when operating a cluster. Usually some user intervention is expected to - recover a cluster from this status. -* 50: interpreted as ``CRITICAL`` level. Events at this level are about - serious problems encountered by the engine. The engine service may have - run into some bugs. User intervention is required to do a recovery. - -Event Dispatcher Configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Senlin provides an open architecture for event dispatching. Two of the -built-in dispatchers are ``database`` and ``message``. - -1. The ``database`` dispatcher dumps the events into database tables and it -is enabled by default. - -2. The ``message`` dispatcher converts the event objects into versioned event -notifications and published on the global message queue. This dispatcher is -by default disabled. To enable it, you can add the following line to the -``[DEFAULT]`` section of the ``senlin.conf`` file and then restart the service -engine:: - - [default] - event_dispatchers = message - -Based on your deployment settings, you have to add the following lines to -the ``senlin.conf`` file as well when using ``message`` dispatcher. This lines -set ``messaging`` as the default driver used by the ``oslo.messaging`` -package:: - - [oslo_messaging_notifications] - driver = messaging - -With this configuration, the `database` dispatcher will be disabled, which -means you can only access to the events by the message queue. - -3. The ``event_dispatchers`` field is ``MultiString``, you can enable -both the ``database`` and ``message`` dispatchers if needed by the following -configuration:: - - [default] - event_dispatchers = database - event_dispatchers = message - - [oslo_messaging_notifications] - driver = messaging - -Note that unprocessed event notifications which are not associated with a -TTL (time to live) value by default will remain queued at the message bus, -please make sure the Senlin event notifications will be subscribed and -processed by some services before enabling the ``message`` dispatcher. - -By default, we use the ``senlin`` exchange which type is ``TOPIC`` to route -the notifications to queues with different ``routing_key``. The queues name -could be ``versioned_notifications.debug``, ``versioned_notifications.info``, -``versioned_notifications.warn`` and ``versioned_notifications.error`` that -depends on the log level you are using in ``senlin.conf``. The corresponding -``routing_key`` are the same as the queues' name. - -There are two options to consume the notifications: - -- Consume the notifications from the default queues directly. -- Declare your own queues, then bind them to ``senlin`` exchange with - corresponding ``routing_key`` to customize the flow. - -Since the event dispatchers are designed as plug-ins, you can develop your own -event dispatchers and have senlin engine load them on startup. For more -details on developing and plugging in your own event dispatchers, please refer -to the :doc:`../contributor/plugin_guide` document. - -The following sections are about examining events when using the ``database`` -dispatcher which creates database records when events happen. - - -Listing Events -~~~~~~~~~~~~~~ - -The following command lists the events by the Senlin engine:: - - $ openstack cluster event list - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - | id | generated_at | obj_type | obj_id | obj_name | action | status | level | cluster_id | - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - | 1f72eb5e | 2015-12-17T15:41:48 | NODE | 427e64f3 | node-7171... | update | ACTIVE | 20 | | - | 20b8eb9a | 2015-12-17T15:41:49 | NODE | 6da22a49 | node-7171... | update | ACTIVE | 20 | | - | 23721815 | 2015-12-17T15:42:51 | NODEACTION | 5e9a9d3d | node_dele... | NODE_DELETE | START | 20 | | - | 54f9eae4 | 2015-12-17T15:41:36 | CLUSTERACTION | 1bffa11d | cluster_c... | CLUSTER_CREATE | SUCCEEDED | 20 | 9f1883a7 | - | 7e30df62 | 2015-12-17T15:42:51 | CLUSTERACTION | d3cef701 | cluster_d... | CLUSTER_DELETE | START | 20 | 9f1883a7 | - | bf51f23c | 2015-12-17T15:41:54 | CLUSTERACTION | d4dbbcea | cluster_s... | CLUSTER_SCALE_OUT | START | 20 | 9f1883a7 | - | c58063e9 | 2015-12-17T15:42:51 | NODEACTION | b2292bb1 | node_dele... | NODE_DELETE | START | 20 | | - | ca7d30c6 | 2015-12-17T15:41:38 | CLUSTERACTION | 0be70b0f | attach_po... | CLUSTER_ATTACH_POLICY | START | 20 | 9f1883a7 | - | cfe5d0d7 | 2015-12-17T15:42:51 | CLUSTERACTION | 42cf5baa | cluster_d... | CLUSTER_DELETE | START | 20 | 9f1883a7 | - | fe2fc810 | 2015-12-17T15:41:49 | CLUSTERACTION | 0be70b0f | attach_po... | CLUSTER_ATTACH_POLICY | SUCCEEDED | 20 | 9f1883a7 | - +----------+---------------------+---------------+----------+--------------+-----------------------+-----------+-------+------------+ - -The :program:`openstack cluster event list` command line supports various -options when listing the events. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list events, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are event properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the events using the ``timestamp`` -property in descending order:: - - $ openstack cluster event list --sort timestamp:desc - -When sorting the list of events, you can use one of ``timestamp``, ``level``, -``otype``, ``oname``, ``user``, ``action`` and ``status``. - - -Filtering the List ------------------- - -You can filter the list of events using the :option:`--filters``. For example, -the following command filters the event list by the ``otype`` property:: - - $ openstack cluster event list --filters otype=NODE - -The option :option:`--filters` accepts a list of key-value pairs separated by -semicolon (``;``), where each pair is expected to be of format ``key=val``. -The valid keys for filtering include ``oname``, ``otype``, ``oid``, -``cluster_id``, ``action``, ``level`` or any combination of them. - - -Paginating the Query results ----------------------------- - -In case you have a huge collection of events (which is highly likely the case), -you can limit the number of events returned using the option -:option:`--limit `. For example:: - - $ openstack cluster event list --limit 10 - -Another option you can specify is the ID of an event after which you want to -see the returned list starts. In other words, you don't want to see those -events with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster event list --limit 20 \ - --marker 2959122e-11c7-4e82-b12f-f49dc5dac270 - -At most 20 action records will be returned in this example and its UUID comes -after the one specified from the command line. - - -Showing Details of an Event -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :program:`senlin` command line to show the details about an -event you are interested in. When specifying the identity of the event, you -can use its name, its ID or its "short ID" . Senlin API and engine will verify -if the identifier you specified can uniquely identify an event. An error -message will be returned if there is no event matching the identifier or if -more than one event matching it. - -An example is shown below:: - - $ openstack cluster event show 19ba155a - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | action | NODE_DELETE | - | cluster_id | ce85d842-aa2a-4d83-965c-2cab5133aedc | - | generated_at | 2015-12-17T15:43:26+00:00 | - | id | 19ba155a-d327-490f-aa0f-589f67194b2c | - | level | INFO | - | location | None | - | name | None | - | obj_id | cd9f519a-5589-4cbf-8a74-03b12fd9436c | - | obj_name | node-ce85d842-003 | - | obj_type | NODE | - | project_id | 42d9e9663331431f97b75e25136307ff | - | status | end | - | status_reason | Node deleted successfully. | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - - -See Also -~~~~~~~~ - -* :doc:`Operating Actions ` diff --git a/doc/source/user/membership.rst b/doc/source/user/membership.rst deleted file mode 100644 index 138f8d676..000000000 --- a/doc/source/user/membership.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-membership: - -================== -Cluster Membership -================== - -Concept -~~~~~~~ - -A :term:`Node` can belong to at most one :term:`Cluster` at any time. A node -is referred to as an *orphan node* when it doesn't belong to any cluster. - -A node can be made a member of cluster when creation, or you can change the -cluster membership after the cluster and the node have been created. - - -Listing Nodes in a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using the command :command:`openstack cluster members list`, you can list the -nodes that are members of a specific cluster. For example, to list nodes in -cluster ``c3``, you can use the following command:: - - $ openstack cluster members list c3 - +----------+--------+-------+--------+-------------+---------------------+ - | id | name | index | status | physical_id | created_at | - +----------+--------+-------+--------+-------------+---------------------+ - | b28692a5 | stack1 | 1 | ACTIVE | fdf028a6 | 2015-07-07T05:23:40 | - | 4be10a88 | stack2 | 2 | ACTIVE | 7c87f545 | 2015-07-07T05:27:54 | - +----------+--------+-------+--------+-------------+---------------------+ - -You can use the name, the ID or the "short ID" of a cluster as the argument -for node listing. If the specified cluster identifier cannot match any cluster -or it matches more than one cluster, you will get an error message. - -From the list, you can see the ``index``, ``status``, ``physical_id`` of each -node in this cluster. Note that the ``id`` field and the ``physical_id`` field -are shown as "short ID"s by default. If you want to see the full IDs, you can -specify the :option:`--full-id` option to indicate that:: - - $ openstack cluster members list --full-id c3 - +------------...-+--------+-------+--------+-------------+-----------..-+ - | id | name | index | status | physical_id | created_at | - +------------...-+--------+-------+--------+-------------+-----------..-+ - | b28692a5-25... | stack1 | 1 | ACTIVE | fdf0... | 2015-07-07.. | - | 4be10a88-e3... | stack2 | 2 | ACTIVE | 7c87... | 2015-07-07.. | - +------------...-+--------+-------+--------+-------------+-----------..-+ - -If the cluster size is very large, you may want to list the nodes in pages. -This can be achieved by using the :option:`--marker` option together with the -:option:`--limit` option. The ``marker`` option value specifies a node ID -after which you want the resulted list to start; and the ``limit`` option -value specifies the number of nodes you want to include in the resulted list. -For example, the following command lists the nodes starting after a specific -node ID with the length of the list set to 10:: - - $ openstack cluster members list --marker b28692a5 --limit 10 webservers - -Another useful option for listing nodes is the :option:`--filters ` -option. The option value accepts a string of format "``K1=V1;K2=V2...``", -where "``K1``" and "``K2``" are node properties for checking, "``V1``" and -"``V2``" are values for filtering. The acceptable properties for filtering are -``name`` and ``status``. For example, the following command lists cluster -nodes from a cluster based on whether a node's status is "``ACTIVE``":: - - $ openstack cluster members list --filters status=ACTIVE webservers - - -Specify the Cluster When Creating a Node -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are several ways to make a node a member of a cluster. When creating a -node using command :command:`openstack cluster node create`, you can specify -the option :option:`--cluster` to tell Senlin to which cluster the new node -belongs. Please refer to :ref:`ref-nodes` for detailed instructions. - - -Adding Node(s) to A Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When you already have some nodes and some clusters, you can add some specified -nodes to a specified cluster using the :command:`openstack cluster members add` -command. For example, the following command adds two nodes to a cluster:: - - $ openstack cluster members add --nodes node3,node4 cluster1 - -You can use the name, the ID or the "short ID" to name the node(s) to be -added, you can also use the name, the ID or the "short ID" to specify the -cluster. When the identifiers you specify cannot match any existing nodes or -clusters respectively, you will receive an error message. If the identifier -provided matches more than one object, you will get an error message as well. - -Before Senlin engine performs the cluster membership changes, it will verify -if the nodes to be added have the same :term:`Profile Type` with the target -cluster. If the profile types don't match, you will get an error message. - -When nodes are added to a cluster, they will get new ``index`` property values -that can be used to uniquely identify them within the cluster. - - -Removing Node(s) from a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line also provides command -:command:`cluster members del` to remove node(s) from a cluster. In this case, -you can use the name, the ID or the "short ID" to specify the node(s) and the -cluster. The identifier specified must uniquely identifies a node or a cluster -object, or else you will get an error message indicating that the request was -rejected. The following command removes two nodes from a cluster:: - - $ openstack cluster members del --nodes node21,node22 webservers - -When performing this operation, Senlin engine will check if the specified -nodes are actually members of the specified cluster. If any node from the -specified node list does not belong to the target cluster, you will get an -error message and the command fails. - -When nodes are removed from a cluster, they will get their ``index`` property -reset to -1. - - -Replacing Node(s) in a Cluster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line also provides command -:command:`cluster members replace` to replace node(s) in a cluster. The argument -"--nodes" is used to describe the list of node pairs like . -OLD_NODE is the name or ID of a node to be replaced, and NEW_NODE is the name or -ID of a node as replacement. You can use the name, the ID or the "short ID" to -specify the cluster. The identifier specified must uniquely identifies a node -or a cluster object, or else you will get an error message indicating that the -request was rejected. The following command replaces node21 with node22:: - - $ openstack cluster members replace --nodes node21=node22 webservers - -When performing this operation, Senlin engine will check if the replaced -nodes are actually members of the specified cluster. If any node from the -specified node list does not belong to the target cluster, you will get an -error message and the command fails. - -When nodes are removed from the cluster, they will get their ``index`` property -reset to -1. - - -See Also -~~~~~~~~ - -Below are links to documents related to clusters and nodes: - -- :doc:`Creating Clusters ` -- :doc:`Creating Nodes ` diff --git a/doc/source/user/nodes.rst b/doc/source/user/nodes.rst deleted file mode 100644 index f3b43d686..000000000 --- a/doc/source/user/nodes.rst +++ /dev/null @@ -1,487 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-nodes: - -===== -Nodes -===== - -Concept -~~~~~~~ - -A :term:`Node` is a logical object managed by the Senlin service. A node can -be a member of at most one cluster at any time. A node can be an orphan node -which means it doesn't belong to any clusters. Senlin provides APIs and -command line supports to manage node's cluster membership. Please refer to -:ref:`ref-membership` for details. - -A node has a ``profile_id`` property when created that specifies which -:term:`Profile` to use when creating a physical object that backs the node. -Please refer to :ref:`ref-profiles` for the creation and management of -profile objects. - - -Listing Nodes -~~~~~~~~~~~~~ - -To list nodes that are managed by the Senlin service, you will use the command -:command:`openstack cluster node list`. For example:: - - $ openstack cluster node list - +----------+--------+-------+--------+------------+-------------+... - | id | name | index | status | cluster_id | physical_id | - +----------+--------+-------+--------+------------+-------------+... - | e1b39a08 | node1 | -1 | ACTIVE | | 89ce0d2b | - | 57962220 | node-3 | -1 | ACTIVE | | 3386e306 | - | b28692a5 | stack1 | 1 | ACTIVE | 2b7e9294 | fdf028a6 | - | 4be10a88 | stack2 | 2 | ACTIVE | 2b7e9294 | 7c87f545 | - +----------+--------+-------+--------+------------+-------------+... - -Note that some columns in the output table are *short ID* of objects. Senlin -command line use short IDs to save real estate on screen so that more useful -information can be shown on a single line. To show the *full ID* in the list, -you can add the option :option:`--full-id` to the command. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list nodes, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are node properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the nodes using the ``status`` -property in descending order:: - - $ openstack cluster node list --sort status:desc - -When sorting the list of nodes, you can use one of ``index``, ``name``, -``status``, ``init_at``, ``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -You can specify the option :option:`--cluster ` to list nodes that -are members of a specific cluster. For example:: - - $ openstack cluster node list --cluster c3 - +----------+---------+-------+--------+------------+-------------+... - | id | name | index | status | cluster_id | physical_id | - +----------+---------+-------+--------+------------+-------------+... - | b28692a5 | stack1 | 1 | ACTIVE | 2b7e9294 | fdf028a6 | - | 4be10a88 | stack2 | 2 | ACTIVE | 2b7e9294 | 7c87f545 | - +----------+---------+-------+--------+------------+-------------+... - -Besides these two options, you can add the option :option:`--filters -` to the command :command:`openstack cluster node list` to -specify keys (node property names) and values you want to filter the list. -The valid keys for filtering are ``name`` and ``status``. For example, the -command below filters the list by node status ``ACTIVE``:: - - $ openstack cluster node list --filters status=ACTIVE - - -Paginating the List -------------------- - -In case you have a large number of nodes, you can limit the number of nodes -returned from Senlin server each time, using the option :option:`--limit -`. For example:: - - $ openstack cluster node list --limit 1 - -Another option you can specify is the ID of a node after which you want to -see the returned list starts. In other words, you don't want to see those -nodes with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster node list --marker 765385ed-f480-453a-8601-6fb256f512fc - -With option :option:`--marker` and option :option:`--limit`, you will be able -to control how many node records you will get from each request. - - -Creating a Node -~~~~~~~~~~~~~~~ - -To create a node, you need to specify the ID or name of the profile to be -used. For example, the following example creates a node named ``test_node`` -using a profile named ``pstack``:: - - $ openstack cluster node create --profile pstack test_node - +---------------+--------------------------------------+ - | Property | Value | - +---------------+--------------------------------------+ - | cluster_id | | - | created_at | None | - | data | {} | - | dependents | {} | - | details | None | - | domain_id | None | - | id | 1984b5a0-9dd7-4dda-b1e6-e8c1f640598f | - | index | -1 | - | init_at | 2015-07-09T11:41:18 | - | location | None | - | metadata | {} | - | name | test_node | - | physical_id | None | - | profile_id | 9b127538-a675-4271-ab9b-f24f54cfe173 | - | profile_name | pstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | role | | - | status | INIT | - | status_reason | Initializing | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - -When processing this request, Senlin engine will verify if the profile value -specified is a profile name, a profile ID or the short ID of a profile object. -If the profile is not found or multiple profiles found matching the value, you -will receive an error message. - -Note that the ``index`` property of the new node is -1. This is because we -didn't specify the owning cluster for the node. To join a node to an existing -cluster, you can either use the :command:`openstack cluster member add` -command (:ref:`ref-membership`) after the node is created, or specify the -owning cluster upon node creation, as shown by the following example:: - - $ openstack cluster node create --profile pstack --cluster c1 test_node - -The command above creates a new node using profile ``pstack`` and makes it a -member of the cluster ``c1``, specified using the option :option:`--cluster`. -When a node becomes a member of a cluster, it will get a value for its -``index`` property that uniquely identifies itself within the owning cluster. - -When the owning cluster is specified, Senlin engine will verify if the cluster -specified is referencing a profile that has the same :term:`Profile Type` as -that of the new node. If the profile types don't match, you will receive an -error message from the :command:`openstack cluster` command. - -Another argument that could be useful when creating a new node is the option -:option:`--role `. The value could be used by a profile type -implementation to treat nodes differently. For example, the following command -creates a node with a ``master`` role:: - - $ openstack cluster node create --profile pstack --cluster c1 \ - --role master master_node - -A profile type implementation may check this role value when operating the -physical object that backs the node. It is okay for a profile type -implementation to ignore this value. - -The last argument you can specify when creating a new node is the option -:option:`--metadata `. The value for this option is a list of -key-value pairs separated by a semicolon ('``;``'). These key-value pairs are -attached to the node and can be used for whatever purposes. For example:: - - $ openstack cluster node create --profile pstack \ - --metadata owner=JohnWhite test_node - - -Showing Details of a Node -~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the name, the ID or the "short ID" of a node to name a node for -show. The Senlin API and engine will verify if the identifier you specified -can uniquely identify a node. An error message will be returned if there is -no node matching the identifier or if more than one node matching it. - -An example is shown below:: - - $ openstack cluster node show test_node - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | cluster_id | None | - | created_at | 2015-07-09T11:41:20 | - | data | {} | - | dependents | {} | - | details | {} | - | domain_id | None | - | id | 1984b5a0-9dd7-4dda-b1e6-e8c1f640598f | - | index | -1 | - | init_at | 2015-07-09T11:41:18 | - | location | None | - | metadata | {} | - | name | test_node | - | physical_id | 0e444642-b280-4c88-8be4-76ad0d158dac | - | profile_id | 9b127538-a675-4271-ab9b-f24f54cfe173 | - | profile_name | pstack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | role | None | - | status | ACTIVE | - | status_reason | Creation succeeded | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +---------------+--------------------------------------+ - -From the output, you can see the ``physical_id`` of a node (if it has been -successfully created). For different profile types, this value may be the -ID of an object that is of certain type. For example, if the profile type used -is "``os.heat.stack``", this means the Heat stack ID; if the profile type used -is "``os.nova.server``", it gives the Nova server ID. - -An useful argument for the command :command:`openstack cluster node show` is -the option :option:`--details`. When specified, you will get the details about -the physical object that backs the node. For example:: - - $ openstack cluster node show --details test_node - - -Checking a Node -~~~~~~~~~~~~~~~ - -Once a node has been created, you can use the name, the ID or the "short ID" of -a node to name a node for check. senlin-engine performs a profile-specific check -operation to get the latest status of the physical resource (for example a virtual -machine). If the virtual machine status is not ACTIVE, the node will be set to -ERROR status. For example:: - - $ openstack cluster node check node-biQA3BOM - - -Recovering a Node -~~~~~~~~~~~~~~~~~ -After a node has been created and running for a period of time, if the node goes -into ERROR status, you can use to try to restore the node to ACTIVE status, using -the :command:`openstack cluster node recover`. The restore operation will delete -the specified node and recreate it. For example:: - - $ openstack cluster node recover node-biQA3BOM - - -Updating a Node -~~~~~~~~~~~~~~~ - -Once a node has been created, you can change its properties using the command -:command:`openstack cluster node update`. For example, to change the name of a -node, you can use the option :option:`--name` , as shown by the following -command:: - - $ openstack cluster node update --name new_node_name old_node_name - -Similarly, you can modify the ``role`` property of a node using the option -:option:`--role`. For example:: - - $ openstack cluster node update --role slave master_node - -You can change the metadata associated with a node using the option -:option:`--metadata`:: - - $ openstack cluster node update --metadata version=2.1 my_node - -Using the :command:`openstack cluster node update` command, you can change the -profile used by a node. The following example updates a node for switching to -use a different profile:: - - $ openstack cluster node update --profile fedora21_server fedora20_server - -Suppose the node ``fedora20_server`` is now using a profile of type -``os.nova.server`` where a Fedora 20 image is used, the command above will -initiate an upgrade to use a new profile with a Fedora 21 image. - -Senlin engine will verify whether the new profile has the same profile type -with that of the existing one and whether the new profile has a well-formed -``spec`` property. If everything is fine, the engine will start profile update -process. - - -Adopting a Node -~~~~~~~~~~~~~~~ - -In Senlin service, we can adopt an existing resource as a node and create a -profile for this node. To adopt a node, you need to specify the resource -physical ID by setting :option:`--identity ` and resource -profile_type name by setting :option:`--type `. For example, the -following example adopts a server with ID -``1177c8e8-8472-4e9d-8f15-1d4866b85b8b`` as a node named ``test_adopt_node``:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node - +---------------+--------------------------------------+ - | Field | Value | - +---------------+--------------------------------------+ - | cluster_id | | - | created_at | 2017-08-16T07:52:50Z | - | data | {} | - | dependents | {} | - | details | None | - | domain_id | None | - | id | f88b1d7d-1e25-4362-987c-52f8aea26520 | - | index | -1 | - | init_at | 2017-08-16T07:52:50Z | - | location | None | - | metadata | {} | - | name | test_adopt_node | - | physical_id | 1177c8e8-8472-4e9d-8f15-1d4866b85b8b | - | profile_id | f9e5e3dd-d4f3-44a1-901e-351fa39e5801 | - | profile_name | prof-test_adopt_node | - | project_id | 138cf3f92bb3459da02363db8d53ac30 | - | role | | - | status | ACTIVE | - | status_reason | Node adopted successfully | - | updated_at | None | - | user_id | 67dc524bfb45492496c8ff7ecdedd394 | - +---------------+--------------------------------------+ - -The :option:`--name ` is optional, if omitted, Senlin engine will -generate a random name start with ``node-`` for the node. - -The option :option:`--role ` could be used by a profile type -implementation to treat nodes differently. For example, the following command -adopts a server as a node with a ``master`` role:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node --role master - -The option :option:`--metadata ` is a list of -key-value pairs separated by a semicolon ('``;``'). These key-value pairs are -attached to the node and can be used for whatever purposes. For example:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b --type os.nova.server-1.0 \ - --name test_adopt_node --metadata "key1=value1;key2=value2" - -Another option :option:`--overrides ` support user to override -the node profile properties. For example, the following command can adopt a -server as a node and override the network properties in node's profile:: - - $ openstack cluster node adopt --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 \ - --override '{"networks":[{"network": "public"}]}' - -The option :option:`--snapshot ` is boolean type. If set, senlin -Senlin engine will create a snapshot for the resource before accept the -resource as a node. - - -Previewing a Node for Adoption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A resource can be previewed before getting adopted as a Senlin node using the -:command:`openstack cluster node adopt` command with option -:option:`--preview `. To preview a node, you need to specify the -resource physical ID by setting :option:`--identity ` and resource -profile_type name by setting :option:`--type `. For example:: - - $ openstack cluster node adopt --preview \ - --identity 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 - +--------------+----------------------------------------------------------------------+ - | Field | Value | - +--------------+----------------------------------------------------------------------+ - | node_preview | +------------+-----------------------------------------------------+ | - | | | property | value | | - | | +------------+-----------------------------------------------------+ | - | | | properties | { | | - | | | | "name": "test0", | | - | | | | "availability_zone": "nova", | | - | | | | "block_device_mapping_v2": [], | | - | | | | "image": "6232a7b9-8af1-4dce-8eb5-f2988a0e34bc", | | - | | | | "key_name": "oskey", | | - | | | | "auto_disk_config": false, | | - | | | | "flavor": "1", | | - | | | | "metadata": {}, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "security_groups": [ | | - | | | | "default", | | - | | | | "default" | | - | | | | ], | | - | | | | "config_drive": false | | - | | | | } | | - | | | type | os.nova.server | | - | | | version | 1.0 | | - | | +------------+-----------------------------------------------------+ | - +--------------+----------------------------------------------------------------------+ - -The option :option:`--overrides ` support user to override the node -profile properties. For example, the following command can adopt a server -as a node and override the network properties in node's profile:: - - $ openstack cluster node adopt --preview --identity \ - 1177c8e8-8472-4e9d-8f15-1d4866b85b8b \ - --type os.nova.server-1.0 \ - --override '{"networks":[{"network": "public"}]}' - +--------------+----------------------------------------------------------------------+ - | Field | Value | - +--------------+----------------------------------------------------------------------+ - | node_preview | +------------+-----------------------------------------------------+ | - | | | property | value | | - | | +------------+-----------------------------------------------------+ | - | | | properties | { | | - | | | | "name": "test0", | | - | | | | "availability_zone": "nova", | | - | | | | "block_device_mapping_v2": [], | | - | | | | "image": "6232a7b9-8af1-4dce-8eb5-f2988a0e34bc", | | - | | | | "key_name": "oskey", | | - | | | | "auto_disk_config": false, | | - | | | | "flavor": "1", | | - | | | | "metadata": {}, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "public" | | - | | | | } | | - | | | | ], | | - | | | | "security_groups": [ | | - | | | | "default", | | - | | | | "default" | | - | | | | ], | | - | | | | "config_drive": false | | - | | | | } | | - | | | type | os.nova.server | | - | | | version | 1.0 | | - | | +------------+-----------------------------------------------------+ | - +--------------+----------------------------------------------------------------------+ - -The option :option:`--snapshot ` is boolean type. If set, senlin -Senlin engine will create a snapshot for the resource before accept the -resource as a node. - - -Deleting a Node -~~~~~~~~~~~~~~~ - -A node can be deleted using the :command:`openstack cluster node delete` -command, for example:: - - $ openstack cluster node delete my_node - -Note that in this command you can use the name, the ID or the "short ID" to -specify the node you want to delete. If the specified criteria cannot match -any nodes, you will get a ``ResourceNotFound`` exception. If more than one -node matches the criteria, you will get a ``MultipleChoices`` exception. - - -See Also -~~~~~~~~ - -Below are links to documents related to node management: - -- :doc:`Managing Profile Objects ` -- :doc:`Creating Clusters ` -- :doc:`Managing Cluster Membership ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/policies.rst b/doc/source/user/policies.rst deleted file mode 100644 index d4217fad9..000000000 --- a/doc/source/user/policies.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-policies: - -======== -Policies -======== - -Concept -~~~~~~~ - -A :term:`Policy` is an object instantiated from a :term:`Policy Type`. Once -created, it can be dynamically attached to or detached from a cluster. Such a -policy usually contains rules to be checked/enforced when certain -:term:`Action` is about to be executed or has been executed. - -One policy can be attached to many clusters, and one cluster can be attached -with many policies. In addition to this, a policy on a cluster can be -dynamically enabled or disabled. Please refer to :ref:`ref-bindings` for -details. - - -Listing Policies -~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line provides a sub-command -:command:`openstack cluster policy list` that can be used to enumerate policy -objects known to the service. For example:: - - $ openstack cluster policy list - +----------+------+-----------------------------+---------------------+ - | id | name | type | created_at | - +----------+------+-----------------------------+---------------------+ - | 239d7212 | dp01 | senlin.policy.deletion-1.0 | 2015-07-11T04:24:34 | - | 7ecfd026 | lb01 | senlin.policy.placement-1.0 | 2015-07-11T04:25:28 | - +----------+------+-----------------------------+---------------------+ - -Note that the first column in the output table is a *short ID* of a policy -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command. - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list policies, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are policy properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the policies using the ``name`` -property in descending order:: - - $ openstack cluster policy list --sort name:desc - -When sorting the list of policies, you can use one of ``type``, ``name``, -``created_at`` and ``updated_at``. - - -Paginating the List -------------------- - -In case you have a huge collection of policy objects, you can limit the number -of policies returned from Senlin server, using the option :option:`--limit`. -For example:: - - $ openstack cluster policy list --limit 1 - +----------+------+----------------------------+---------------------+ - | id | name | type | created_at | - +----------+------+----------------------------+---------------------+ - | 239d7212 | dp01 | senlin.policy.deletion-1.0 | 2015-07-11T04:24:34 | - +----------+------+----------------------------+---------------------+ - -Yet another option you can specify is the ID of a policy object after which -you want to see the list starts. In other words, you don't want to see those -policies with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster policy list --limit 1 \ - --marker 239d7212-6196-4a89-9446-44d28717d7de - -Combining the :option:`--marker` option and the :option:`--limit` option -enables you to do pagination on the results returned from the server. - - -Creating a Policy -~~~~~~~~~~~~~~~~~ - -When creating a new policy object, you need a "spec" file in YAML format. You -may want to check the :command:`openstack cluster policy type show` command in -:ref:`ref-policy-types` for the property names and types for a specific -:term:`Policy Type`. For example, the following is a spec for the policy type -``senlin.policy.deletion`` (the source can be found in the -:file:`examples/policies/deletion_policy.yaml` file):: - - # Sample deletion policy that can be attached to a cluster. - type: senlin.policy.deletion - version: 1.0 - properties: - # The valid values include: - # OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM - criteria: OLDEST_FIRST - - # Whether deleted node should be destroyed - destroy_after_deletion: True - - # Length in number of seconds before the actual deletion happens - # This param buys an instance some time before deletion - grace_period: 60 - - # Whether the deletion will reduce the desired capability of - # the cluster as well. - reduce_desired_capacity: False - -The properties in this spec file are specific to the ``senlin.policy.deletion`` -policy type. To create a policy object using this "spec" file, you can use the -following command:: - - $ cd /opt/stack/senlin/examples/policies - $ openstack cluster policy create --spec deletion_policy.yaml dp01 - +------------+-----------------------------------------------------------+ - | Field | Value | - +------------+-----------------------------------------------------------+ - | created_at | None | - | data | {} | - | domain_id | None | - | id | c2e3cd74-bb69-4286-bf06-05d802c8ec12 | - | location | None | - | project_id | 42d9e9663331431f97b75e25136307ff | - | name | dp01 | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.deletion", | - | | "description": "A policy for choosing victim node(s).", | - | | "properties": { | - | | "destroy_after_deletion": true, | - | | "grace_period": 60, | - | | "reduce_desired_capacity": false, | - | | "criteria": "OLDEST_FIRST" | - | | } | - | | } | - | type | None | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+-----------------------------------------------------------+ - - -Showing the Details of a Policy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the :command:`openstack cluster policy show` command to show the -properties of a policy. You need to provide an identifier to the command -line to indicate the policy object you want to examine. The identifier can be -the ID, the name or the "short ID" of a policy object. For example:: - - $ openstack cluster policy show dp01 - +------------+------------------------------------------------------------+ - | Field | Value | - +------------+------------------------------------------------------------+ - | created_at | 2015-07-11T04:24:34 | - | data | {} | - | domain_id | None | - | id | c2e3cd74-bb69-4286-bf06-05d802c8ec12 | - | location | None | - | name | dp01 | - | project_id | 42d9e9663331431f97b75e25136307ff | - | spec | { | - | | "version": 1.0, | - | | "type": "senlin.policy.deletion", | - | | "description": "A policy for choosing victim node(s).", | - | | "properties": { | - | | "destroy_after_deletion": true, | - | | "grace_period": 60, | - | | "reduce_desired_capacity": false, | - | | "criteria": "OLDEST_FIRST" | - | | } | - | | } | - | type | None | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+------------------------------------------------------------+ - -When there is no policy object matching the identifier, you will get an error -message. When there is more than one object matching the identifier, you will -get an error message as well. - - -Updating a Policy -~~~~~~~~~~~~~~~~~ - -After a policy object is created, you may want to change some properties of -it. You can use the :command:`openstack cluster policy update` to change the -"``name``" of a policy. For example, the following command renames a policy -object from "``dp01``" to "``dp01_bak``":: - - $ openstack cluster policy update --name dp01_bak dp01 - -If the named policy object could not be found or the parameter value fails the -validation, you will get an error message. - - -Deleting a Policy -~~~~~~~~~~~~~~~~~ - -When there are no clusters referencing a policy object, you can delete it from -the Senlin database using the following command:: - - $ openstack cluster policy delete dp01 - -Note that in this command you can use the name, the ID or the "short ID" to -specify the policy object you want to delete. If the specified criteria -cannot match any policy objects, you will get a ``ResourceNotFound`` exception. -If more than one policy matches the criteria, you will get a ``MultipleChoices`` -exception. - -See Also -~~~~~~~~ - -The list below provides links to documents related to the creation and usage -of policy objects. - -* :doc:`Working with Policy Types ` -* :ref:`Affinity Policy ` -* :ref:`Batch Policy ` -* :ref:`Deletion Policy ` -* :ref:`Health Policy ` -* :ref:`Load-Balancing Policy ` -* :ref:`Region Placement Policy ` -* :ref:`Scaling Policy ` -* :ref:`Zone Placement Policy ` -* :doc:`Managing the Bindings between Clusters and Policies ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/policy_types.rst b/doc/source/user/policy_types.rst deleted file mode 100644 index 83c6f1f89..000000000 --- a/doc/source/user/policy_types.rst +++ /dev/null @@ -1,208 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-policy-types: - -============ -Policy Types -============ - -Concept -~~~~~~~ - -A :term:`Policy Type` is an abstract specification of the rules to be checked -and/or enforced when an :term:`Action` is performed on a cluster that -contains nodes of certain :term:`Profile Type`. - -A registry of policy types is built in memory when the Senlin engine -(:program:`senlin-engine`) is started. In future, Senlin will allow users to -provide additional policy type implementations as plug-ins to be loaded -dynamically. - -A policy type implementation dictates which fields are required, which fields -are optional and sometimes the constraints on field values. When a -:term:`Policy` is created by referencing this policy type, the fields are -assigned with concrete values. For example, a policy type -``senlin.policy.deletion`` conceptually specifies the properties required:: - - criteria: String # valid values - OLDEST_FIRST, YOUNGEST_FIRST, RANDOM - destroy_after_deletion: Boolean - grace_period: Integer - reduce_desired_capacity: Boolean - -The specification of a policy object of this policy type may look like -following:: - - type: senlin.policy.deletion - version: 1.0 - properties: - criteria: OLDEST_FIRST - destroy_after_deletion: True - grace_period: 120 - reduce_desired_capacity: True - - -Listing Policy Types -~~~~~~~~~~~~~~~~~~~~ - -Senlin server comes with some built-in policy types. You can check the list -of policy types using the following command:: - - $ openstack cluster policy type list - +--------------------------------+---------+----------------------------+ - | name | version | support_status | - +--------------------------------+---------+----------------------------+ - | senlin.policy.affinity | 1.0 | SUPPORTED since 2016.10 | - | senlin.policy.batch | 1.0 | EXPERIMENTAL since 2017.02 | - | senlin.policy.deletion | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.deletion | 1.1 | SUPPORTED since 2018.01 | - | senlin.policy.health | 1.0 | EXPERIMENTAL since 2017.02 | - | senlin.policy.loadbalance | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.loadbalance | 1.1 | SUPPORTED since 2018.01 | - | senlin.policy.region_placement | 1.0 | EXPERIMENTAL since 2016.04 | - | | | SUPPORTED since 2016.10 | - | senlin.policy.scaling | 1.0 | SUPPORTED since 2016.04 | - | senlin.policy.zone_placement | 1.0 | EXPERIMENTAL since 2016.04 | - | | | SUPPORTED since 2016.10 | - +--------------------------------+---------+----------------------------+ - - -The output is a list of policy types supported by the Senlin server. - - -Showing Policy Details -~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Policy Type` has a schema for its *spec* (i.e. specification) -that describes the names and types of the properties that can be accepted. To -show the schema of a specific policy type along with other properties, you can -use the following command:: - - $ openstack cluster policy type show senlin.policy.deletion-1.1 - support_status: - '1.0': - - since: '2016.04' - status: SUPPORTED - '1.1': - - since: '2018.01' - status: SUPPORTED - id: senlin.policy.deletion-1.1 - location: null - name: senlin.policy.deletion-1.1 - schema: - criteria: - constraints: - - constraint: - - OLDEST_FIRST - - OLDEST_PROFILE_FIRST - - YOUNGEST_FIRST - - RANDOM - type: AllowedValues - default: RANDOM - description: Criteria used in selecting candidates for deletion - required: false - type: String - updatable: false - destroy_after_deletion: - default: true - description: Whether a node should be completely destroyed after deletion. Default - to True - required: false - type: Boolean - updatable: false - grace_period: - default: 0 - description: Number of seconds before real deletion happens. - required: false - type: Integer - updatable: false - hooks: - default: {} - description: Lifecycle hook properties - required: false - schema: - params: - default: {} - required: false - schema: - queue: - default: '' - description: Zaqar queue to receive lifecycle hook message - required: false - type: String - updatable: false - url: - default: '' - description: Url sink to which to send lifecycle hook message - required: false - type: String - updatable: false - type: Map - updatable: false - timeout: - default: 0 - description: Number of seconds before actual deletion happens. - required: false - type: Integer - updatable: false - type: - constraints: - - constraint: - - zaqar - - webhook - type: AllowedValues - default: zaqar - description: Type of lifecycle hook - required: false - type: String - updatable: false - type: Map - updatable: false - reduce_desired_capacity: - default: true - description: Whether the desired capacity of the cluster should be reduced along - the deletion. Default to True. - required: false - type: Boolean - updatable: false - -Here, each property has the following attributes: - -- ``default``: the default value for a property when not explicitly specified; -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`policy-type-show` command is in YAML -format. You can choose to show the spec schema in JSON format by specifying -the :option:`-f json` option as shown below:: - - $ openstack cluster policy type show -f json senlin.policy.deletion-1.0 - -For information on how to manage the relationship between a policy and a -cluster, please refer to :ref:`ref-bindings`. - - -See Also -~~~~~~~~ - -Check the list below for documents related to the creation and usage of -:term:`Policy` objects. - -* :doc:`Creating Your Own Policy Objects ` -* :doc:`Managing the Binding between Cluster and Policy ` -* :doc:`Examining Actions ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/policy_types/affinity.rst b/doc/source/user/policy_types/affinity.rst deleted file mode 100644 index a860e4ef0..000000000 --- a/doc/source/user/policy_types/affinity.rst +++ /dev/null @@ -1,134 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-affinity-policy: - -=============== -Affinity Policy -=============== - -The affinity policy is designed for senlin to leverage the *server group* API -in nova. Using this policy, you can specify whether the nodes in a cluster -should be collocated on the same physical machine (aka. "affinity") or they -should be spread onto as many physical machines as possible (aka. -"anti-affinity"). - -Currently, this policy can be used on nova server clusters only. In other -words, the type name of the cluster's profile has to be ``os.nova.server``. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.affinity_policy.AffinityPolicy - -Sample -~~~~~~ - -A typical spec for an affinity policy looks like the following example: - -.. literalinclude :: /../../examples/policies/affinity_policy.yaml - :language: yaml - -The affinity policy has the following properties: - -- ``servergroup.name``: An optional string that will be used as the name of - server group to be created. -- ``servergroup.policies``: A string indicating the policy to be used for - the server group. -- ``availability_zone``: Optional string specifying the availability zone for - the nodes to launch from. -- ``enable_drs_extension``: A boolean indicating whether VMware vSphere - extension should be enabled. - - -Validation -~~~~~~~~~~ - -When creating an affinity policy, the Senlin engine checks if the provided spec -is valid: - -- The value for ``servergroup.policies`` must be one of "``affinity``" or - "``anti-affinity``". The default value is "``affinity``" if omitted. - -- The value of ``availability_zone`` is the name of an availability zone known - to the Nova compute service. - - -Server Group Name -~~~~~~~~~~~~~~~~~ - -Since the ``os.nova.server`` profile type may contain ``scheduler_hints`` -which has server group specified, the affinity policy will behave differently -based on different settings. - -If the profile used by a cluster contains a ``scheduler_hints`` property (as -shown in the example), the Senlin engine checks if the specified group name -("``group_135``" in this case) is actually known to the Nova compute service -as a valid server group. The server group name from the profile spec will -take precedence over the ``servergroup.name`` value in the policy spec. - -.. code-block:: yaml - - type: os.nova.server - version: 1.0 - properties: - flavor: m1.small - ... - scheduler_hints: - group: group_135 - -If the ``group`` value is found to be a valid server group name, the Senlin -engine will try compare if the policies specified for the nova server group -matches that specified in the affinity policy spec. If the policies don't -match, the affinity policy won't be able to be attached to the cluster. - -If the profile spec doesn't contain a ``scheduler_hints`` property or the -``scheduler_hints`` property doesn't have a ``group`` value, the Senlin engine -will use the ``servergroup.name`` value from the affinity policy spec, if -provided. If the policy spec also failed to provide a group name, the Senlin -engine will try to create a server group with a random name, e.g. -"``server_group_x2mde78a``". The newly created server group will be deleted -automatically when you detach the affinity policy from the cluster. - - -Availability Zone Name -~~~~~~~~~~~~~~~~~~~~~~ - -The spec property ``availability_zone`` is optional, no matter the value for -``enable_drs_extension`` is specified or not or what value it is assigned. -However, if the ``availability_zone`` property does have a value, it will have -an impact on the placement of newly created nodes. This subsection discusses -the cases when DRS extension is not enabled. - -In the case that DRS extension is not enabled and the ``availability_zone`` -property doesn't have a value. Senlin engine won't assign an availability zone -for newly created nodes. - -By contrast, if the ``availability_zone`` property does have a value and it -has been validated to be name of an availability zone known to Nova, all newly -created nodes will be created into the specified availability zone. - - -DRS Extension -~~~~~~~~~~~~~ - -The property ``enable_drs_extension`` tells Senlin engine that the affinity -would be enforced by the VMware vSphere extension. In this case, the value of -the ``availability_zone`` property will be used to search for a suitable -hypervisor to which new nodes are scheduled. - -All newly created nodes in the cluster, when an affinity policy is attached -and enabled, will be scheduled to an availability zone named -``:`` where ```` is the value of ``availability_zone`` and -```` is the hostname of a selected DRS hypervisor. diff --git a/doc/source/user/policy_types/batch.rst b/doc/source/user/policy_types/batch.rst deleted file mode 100644 index a7bd5d1d2..000000000 --- a/doc/source/user/policy_types/batch.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-batch-policy: - -============ -Batch Policy -============ - -The batch policy is designed to automatically group a large number of -operations into smaller batches so that the service interruption can be better -managed and there won't be flood of service requests sending to any other -services that will form a DOS (denial-of-service) attack. - -Currently, this policy is applicable to clusters of all profile types and it -is enforced when cluster is updated. The development team is still looking -for an elegant solution that can regulate the resource creation requests. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.batch_policy.BatchPolicy - -Sample -~~~~~~ - -Below is a typical spec for a batch policy: - -.. literalinclude :: /../../examples/policies/batch_policy.yaml - :language: yaml - -The ``min_in_service`` property specifies the minimum number of nodes to be -kept in ACTIVE status. This is mainly for cluster update use cases. The -other property ``max_batch_size`` specifies the number of nodes to be updated -in each batch. This property is mainly used to ensure that batch requests -are still within the processing capability of a backend service. - -Between each batch of service requests, you can specify an interval in the -unit of seconds using the ``pause_time`` property. This can be used to ensure -that updated nodes are fully active to provide services, for example. diff --git a/doc/source/user/policy_types/deletion.rst b/doc/source/user/policy_types/deletion.rst deleted file mode 100644 index e9db256ed..000000000 --- a/doc/source/user/policy_types/deletion.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-deletion-policy: - -=============== -Deletion Policy -=============== - -The deletion policy is provided to help users control the election of victim -nodes when a cluster is about to be shrank. In other words, when the size of -a cluster is to be decreased, which node(s) should be removed first. - -Currently, this policy is applicable to clusters of all profile types and it -is enforced when the cluster's size is about to be reduced. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.deletion_policy.DeletionPolicy - -Sample -~~~~~~ - -Below is a typical spec for a deletion policy: - -.. literalinclude :: /../../examples/policies/deletion_policy.yaml - :language: yaml - -The valid values for the "``criteria`` property include: - -- ``OLDEST_FIRST``: always select node(s) which were created earlier than - other nodes. - -- ``YOUNGEST_FIRST``: always select node(s) which were created recently - instead of those created earlier. - -- ``OLDEST_PROFILE_FIRST``: compare the profile used by each individual nodes - and select the node(s) whose profile(s) were created earlier than others. - -- ``RANDOM``: randomly select node(s) from the cluster for deletion. This is - the default criteria if omitted. - -.. NOTE:: - - There is an implicit rule (criteria) when electing victim nodes. Senlin - engine always rank those nodes which are not in ACTIVE state or which are - marked as tainted before others. - -There are more several actions that can trigger a deletion policy. Some of -them may already carry a list of candidates to remove, e.g. -``CLUSTER_DEL_NODES`` or ``NODE_DELETE``; others may only carry a number of -nodes to remove, e.g. ``CLUSTER_SCALE_IN`` or ``CLUSTER_RESIZE``. For actions -that already have a list of candidates, the deletion policy will respect the -action inputs. The election of victims only happens when no such candidates -have been identified. - - -Deletion vs Destroy -~~~~~~~~~~~~~~~~~~~ - -There are cases where you don't want the node(s) removed from a cluster to be -destroyed. Instead, you prefer them to become "orphan" nodes so that in future -you can quickly add them back to the cluster without having to create new -nodes. - -If this is your situation, you may want to set ``destroy_after_deletion`` to -``false``. Senlin engine won't delete the node(s) after removing them from the -cluster. - -The default behavior is to delete (destroy) the node(s) after they are -deprived of their cluster membership. - - -Grace Period -~~~~~~~~~~~~ - -Another common scenario is to grant a node a period of time for it to shutdown -gracefully. Even if a node doesn't have a builtin logic to perform a graceful -shutdown, granting them some extra time may still help ensure the resources -they were using have been properly released. - -The default value for ``grace_period`` property is 0, which means the node -deletion happens as soon as it is removed from the cluster. You can customize -this value according to your need. Note that the grace period will be granted -to all node(s) deleted. When setting this value to a large number, be sure -it will not exceed the typical timeout value for action execution. Or else the -node deletion will be a failure. - - -Reduce Desired Capacity or Not -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In most cases, users would anticipate the "desired_capacity" of a cluster be -reduced when there are nodes removed from it. Since the victim selection -algorithm always pick nodes in non-ACTIVE status over ACTIVE ones, you can -actually remove erroneous nodes by taking advantage of this rule. - -For example, there are 4 nodes in a cluster and 2 of them are known to be in -inactive status. You can use the command :command:`openstack cluster members -del` to remove the bad nodes. If you have a deletion policy attached to the -cluster, you get a chance to tell the Senlin engine that you don't want to -change the capacity of the cluster. Instead, you only want the bad nodes -removed. With the help of other cluster health related commands, you can -quickly recover the cluster to a healthy status. You don't have to change the -desired capacity of the cluster to a smaller value and then change it back. - -If this is your use case, you can set ``reduce_desired_capacity`` to ``false`` -in the policy spec. The cluster's desired capacity won't be changed after -cluster membership is modified. - - -Lifecycle Hook -~~~~~~~~~~~~~~ - -If there is a need to receive notification of a node deletion, you can -specify a lifecycle hook in the deletion policy: - -.. code-block:: yaml - - type: senlin.policy.deletion - version: 1.1 - properties: - hooks: - type: 'zaqar' - timeout: 120 - params: - queue: 'my_queue' - -The valid values for the ``type`` are: - -- ``zaqar``: send message to zaqar queue. The name of the zaqar must be - specified in ``queue`` property. - -- ``webhook``: send message to webhook URL. The URL of the webhook must be - specified in ``url`` property. - -``timeout`` property specifies the number of seconds to wait before the -actual node deletion happens. This timeout can be preempted by calling -complete lifecycle hook API. - -.. NOTE:: - - Hooks of type ``webhook`` will be supported in a future version. Currently - only hooks of type ``zaqar`` are supported. - - -Deleting Nodes Across Regions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With the help of :ref:`ref-region-policy`, you will be able to distribute -a cluster's nodes into different regions as instructed. However, when you are -removing nodes from more than one regions, the same distribution rule has to -be respected as well. - -When there is a region placement policy in effect, the deletion policy will -first determine the number of nodes to be removed from each region. Then in -each region, the policy performs a victim election based on the criteria you -specified in the policy spec. - - -Deleting Nodes Across Availability Zones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Similarly, when there is a zone placement policy attached to the cluster in -question, nodes in the cluster may get distributed across a few availability -zones based on a preset algorithm. - -The deletion policy, when triggered, will first determine the number for nodes -to be removed from each availability zone. Then it proceeds to elect victim -nodes based on the criteria specified in the policy spec within each -availability zone. diff --git a/doc/source/user/policy_types/health.rst b/doc/source/user/policy_types/health.rst deleted file mode 100644 index e16e743ab..000000000 --- a/doc/source/user/policy_types/health.rst +++ /dev/null @@ -1,128 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-health-policy: - -============= -Health Policy -============= - -The health policy is designed for Senlin to detect cluster node failures and -to recover them in a way customizable by users. The health policy is not -meant to be an universal solution that can solve all problems related to -high-availability. However, the ultimate goal for the development team is to -provide an auto-healing framework that is usable, flexible, extensible for -most deployment scenarios. - -The policy type is currently applicable to clusters whose profile type is one -of ``os.nova.server`` or ``os.heat.stack``. This could be extended in future. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.health_policy.HealthPolicy - -Sample -~~~~~~ - -A typical spec for a health policy looks like the following example: - -.. literalinclude :: /../../examples/policies/health_policy_poll.yaml - :language: yaml - -There are two groups of properties (``detection`` and ``recovery``), each of -which provides information related to the failure detection and the failure -recovery aspect respectively. - -For failure detection, you can specify a detection mode that can be one of the -following two values: - -- ``NODE_STATUS_POLLING``: Senlin engine (more specifically, the health - manager service) is expected to poll each and every nodes periodically to - find out if they are "alive" or not. - -- ``NODE_STATUS_POLL_URL``: Senlin engine (more specifically, the health - manager service) is expected to poll the specified URL periodically to - find out if a node is considered healthy or not. - -- ``LIFECYCLE_EVENTS``: Many services can emit notification messages on the - message queue when configured. Senlin engine is expected to listen to these - events and react to them appropriately. - -It is possible to combine ``NODE_STATUS_POLLING`` and ``NODE_STATUS_POLL_URL`` -detections by specifying multiple detection modes. In the case of multiple -detection modes, Senlin engine tries each detection type in the order -specified. The behavior of a failed health check in the case of multiple -detection modes is specified using ``recovery_conditional``. - -``LIFECYCLE_EVENTS`` cannot be combined with any other detection type. - -All detection types can carry an optional map of ``options``. When the -detection type is set to "``NODE_STATUS_POLL_URL``", for example, you can -specify a value for ``poll_url`` property to specify the URL to be used for -health checking. - -As the policy type implementation stabilizes, more options may be added later. - -For failure recovery, there are currently two properties: ``actions`` and -``fencing``. The ``actions`` property takes a list of action names and an -optional map of parameters specific to that action. For example, the -``REBOOT`` action can be accompanied with a ``type`` parameter that indicates -if the intended reboot operation is a soft reboot or a hard reboot. - -.. note:: - - The plan for recovery actions is to support a list of actions which can be - tried one by one by the Senlin engine. Currently, you can specify only - *one* action due to implementation limitation. - - Another extension to the recovery action is to add triggers to user provided - workflows. This is also under development. - - -Validation -~~~~~~~~~~ - -Due to implementation limitation, currently you can only specify *one* action -for the ``recovery.actions`` property. This constraint will be removed soon -after the support to action list is completed. - - -Fencing -~~~~~~~ - -Fencing may be an important step during a reliable node recovery process. -Without fencing, we cannot ensure that the compute, network and/or storage -resources are in a consistent, predictable status. However, fencing is very -difficult because it always involves an out-of-band operation to the resource -controller, for example, an IPMI command to power off a physical host sent to -a specific IP address. - -Currently, the health policy only supports the fencing of virtual machines by -forcibly delete it before taking measures to recover it. - - -Snapshots -~~~~~~~~~ - -There have been some requirements to take snapshots of a node before recovery -so that the recovered node(s) will resume from where they failed. This feature -is also on the TODO list for the development team. - - -References -~~~~~~~~~~ - -For more detailed information on how the health policy work, please check -:doc:`Health Policy V1.1 <../../contributor/policies/health_v1>` \ No newline at end of file diff --git a/doc/source/user/policy_types/load_balancing.rst b/doc/source/user/policy_types/load_balancing.rst deleted file mode 100644 index 68bc76a48..000000000 --- a/doc/source/user/policy_types/load_balancing.rst +++ /dev/null @@ -1,295 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-lb-policy: - -===================== -Load-Balancing Policy -===================== - -The load-balancing policy is an encapsulation of the LBaaS v2 service that -distributes the network load evenly among members in a pool. Users are in -general not interested in the implementation details although they have a -strong requirement of the features provided by a load-balancer, such as -load-balancing, health-monitoring etc. - -The load-balancing policy is designed to be applicable to a cluster of virtual -machines or some variants or extensions of basic virtual machines. Currently, -Senlin only supports the load balancing for Nova servers. Future revisions may -extend this to more types of clusters. - -Before using this policy, you will have to make sure the LBaaS v2 service is -installed and configured properly. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.lb_policy.LoadBalancingPolicy - -Sample -~~~~~~ - -The design of the load-balancing policy faithfully follows the interface and -properties exposed by the LBaaS v2 service. A sample spec is shown below: - -.. literalinclude :: /../../examples/policies/lb_policy.yaml - :language: yaml - -As you can see, there are many properties related to the policy. The good news -is that for most of them, there are reasonable default values. All properties -are optional except for the following few: - -- ``vip.subnet`` or ``vip.network``: These properties provides the name or ID - of the subnet or network on which the virtual IP (VIP) is allocated. At least - one (or both) of them must be specified. - -The following subsections describe each and every group of properties and the -general rules on using them. - -Note that you can create and configure load-balancers all by yourself when you -have a good reason to do so. However, by using the load-balancing policy, you -no longer have to manage the load-balancer's lifecycle manually and you don't -have to update the load-balancer manually when cluster membership changes. - - -Load Balancer Pools -~~~~~~~~~~~~~~~~~~~ - -The load balancer pool is managed automatically when you have a load-balancing -policy attached to a cluster. The policy automatically adds existing nodes to -the load balancer pool when attaching the policy. Later on, when new nodes are -added to the cluster (e.g. by cluster scaling) or existing nodes are removed -from the cluster, the policy will update the pool's status to reflect the -change in membership. - -Each pool is supposed to use the same protocol and the same port number for -load sharing. By default, the protocol (i.e. ``pool.protocol``) is set to -"``HTTP``" which can be customized to "``HTTPS``" or "``TCP``" in your setup. -The default port number is 80, which also can be modified to suit your service -configuration. - -All nodes in a pool are supposed to reside on the same subnet, and the subnet -specified in the ``pool.subnet`` property must be compatible to the subnets of -existing nodes. - -The LBaaS service is capable of load balance among nodes in different ways -which are collectively called the ``lb_method``. Valid values for this -property are: - -- ``ROUND_ROBIN``: The load balancer will select a node for workload handling - on a round-robin basis. Each node gets an equal pressure to handle workloads. - -- ``LEAST_CONNECTIONS``: The load balancer will choose a node based on the - number of established connections from client. The node will the lowest - number of connections will be chosen. - -- ``SOURCE_IP``: The load balancer will compute hash values based on the IP - addresses of the clients and the server and then use the hash value for - routing. This ensures the requests from the same client always go to the - same server even in the face of broken connections. - -The ``pool.admin_state_up`` property for the most time can be safely ignored. -It is useful only when you want to debug the details of a load-balancer. - -The last property that needs some attention is ``pool.session_persistence`` -which is used to persist client sessions even if the connections may break now -and then. There are three types of session persistence supported: - -- ``SOURCE_IP``: The load balancer will try resume a broken connection based - on the client's IP address. You don't have to configure the ``cookie_name`` - property in this case. - -- ``HTTP_COOKIE``: The load balancer will check a named, general HTTP cookie - using the name specified in the ``cookie_name`` property and then resume the - connection based on the cookie contents. - -- ``APP_COOKIE``: The load balancer will check the application specific cookie - using the name specified in the ``cookie_name`` and resume connection based - on the cookie contents. - - -Virtual IP -~~~~~~~~~~ - -The Virtual IP (or "VIP" for short) refers to the IP address visible from the -client side. It is the single IP address used by all clients to access the -application or service running on the pool nodes. You have to specify a value -for either the ``vip.subnet`` or ``vip.network`` property even though you don't -have a preference about the actual VIP allocated. However, if you do have a -preferred VIP address to use, you will need to provide both a -``vip.subnet``/``vip.network`` and a ``vip.address`` value. -The LBaaS service will check if both values are valid. - -Note that if you choose to omit the ``vip.address`` property, the LBaaS -service will allocate an address for you from the either the provided subnet, -or a subnet automatically chosen from the provided network. You will -have to check the cluster's ``data`` property after the load-balancing policy -has been successfully attached to your cluster. For example: - -.. code-block:: console - - $ openstack cluster show my_cluster - - +------------------+------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------+ - | created_at | 2017-01-21T06:25:42Z | - | data | { | - | | "loadbalancers": { | - | | "1040ad51-87e8-4579-873b-0f420aa0d273": { | - | | "vip_address": "11.22.33.44" | - | | } | - | | } | - | | } | - | dependents | {} | - | desired_capacity | 10 | - | domain_id | None | - | id | 30d7ef94-114f-4163-9120-412b78ba38bb | - | ... | ... | - -The output above shows you that the cluster has a load-balancer created for -you and the VIP used to access that cluster is "11.22.33.44". - -Similar to the pool properties discussed in previous subsection, for the -virtual IP address, you can also specify the expected network protocol and -port number to use where clients will be accessing it. The default value for -``vip.protocol`` is "``HTTP``" and the default port number is 80. Both can be -customized to suit your needs. - -Another useful feature provided by the LBaaS service is the cap of maximum -number of connections per second. This is a limit set on a per-VIP basis. By -default, Senlin sets the ``vip.connection_limit`` to -1 which means there is -no upper bound for connection numbers. You may want to customize this value -to restrict the number of connection requests per second for your service. - -The last property in the ``vip`` group is ``admin_state_up`` which is default -to "``True``". In some rare cases, you may want to set it to "``False``" for -the purpose of debugging. - - -Health Monitor -~~~~~~~~~~~~~~ - -Since a load-balancer sits in front of all nodes in a pool, it has to be aware -of the health status of all member nodes so as to properly and reliably route -client requests to the active nodes for processing. The problem is that there -are so many different applications or web services each exhibit a different -runtime behavior. It is hard to come up with an approach generic and powerful -enough to detect all kinds of node failures. - -The LBaaS that backs the Senlin load-balancing policy supports four types of -node failure detections, all generic enough to serve a wide range of -applications. - -- ``PING``: The load-balancer pings every pool members to detect if they are - still reachable. - -- ``TCP``: The load-balancer attempts a telnet connection to the protocol port - configured for the pool thus determines if a node is still alive. - -- ``HTTP``: The load-balancer attempts a HTTP request (specified in the - ``health_monitor.http_method`` property) to specific URL (configured in the - ``health_monitor.url_path`` property) and then determines if a node is still - active by comparing the result code to the expected value (configured in the - ``health_monitor.expected_codes``. - -- ``HTTPS``: The load-balancer checks nodes' aliveness by sending a HTTPS - request using the same values as those in the case of ``HTTP``. - -The ``health_monitor.expected_codes`` field accepts a string value, but you -can specify multiple HTTP status codes that can be treated as an indicator of -node's aliveness: - -- A single value, such as ``200``; - -- A list of values separated by commas, such as ``200, 202``; - -- A range of values, such as ``200-204``. - -To make the failure detection reliable, you may want to check and customize -the following properties in the ``health_monitor`` group. - -- ``timeout``: The maximum time in milliseconds that a monitor waits for a - response from a node before it claims the node unreachable. The default is - 5. - -- ``max_retries``: The number of allowed connection failures before the monitor - concludes that node inactive. The default is 3. - -- ``delay``: The time in milliseconds between sending two consecutive requests - (probes) to pool members. The default is 10. - -A careful experimentation is usually warranted to come up with reasonable -values for these fields in a specific environment. - - -LB Status Timeout -~~~~~~~~~~~~~~~~~ - -Due to the way the LBaaS service is implemented, creating load balancers and -health monitors, updating load balancer pools all take considerable time. In -some deployment scenarios, it make take the load balancer several minutes to -become operative again after an update operation. - -The ``lb_status_timeout`` option is provided since version 1.1 of the -load-balancing policy to mitigate this effect. In real production environment, -you are expected to set this value based on some careful dry-runs. - - -Availability Zone -~~~~~~~~~~~~~~~~~ - -Load balancers have their own availability zones, similar to the compute -service. - -The ``availability_zone`` option is provided since version 1.2 of the -load-balancing policy, to allow the user to choose which availability zone to -use when provisioning the load balancer. - -Validation -~~~~~~~~~~ - -When creating a new load-balancing policy object, Senlin checks if the subnet -and/or network provided are actually known to the Neutron network service. If -they are not, the policy creation will fail. - - -Updates to the Cluster and Nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a load-balancing policy has been successfully attached to a cluster, you -can observe the VIP address from the ``data`` property of the cluster, as -described above. - -You can also check the ``data`` property of nodes in the cluster. Each node -will have a ``lb_member`` key in its data property indicating the ID of the -said node in the load-balancer pool. - -When the load-balancing policy is detached from a cluster successfully. These -data will be automatically removed, and the related resources created at the -LBaaS side are deleted transparently. - - -Node Deletion -~~~~~~~~~~~~~ - -In the case where there is a :ref:`ref-deletion-policy` attached to the same -cluster, the deletion policy will elect the victims to be removed from a -cluster before the load-balancing policy gets a chance to remove those nodes -from the load-balancing pool. - -However, when there is no such a deletion policy in place, the load-balancing -policy will try to figure out the number of nodes to delete (if needed) and -randomly choose the victim nodes for deletion. diff --git a/doc/source/user/policy_types/region_placement.rst b/doc/source/user/policy_types/region_placement.rst deleted file mode 100644 index 9b3095e6a..000000000 --- a/doc/source/user/policy_types/region_placement.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-region-policy: - -======================= -Region Placement Policy -======================= - -The region placement policy is designed to enable the deployment and management -resource pools across multiple regions. Note that the current design is only -concerned with a single keystone endpoint for multiple regions, interacting -with keystone federation is planned for future extension. - -The policy is designed to work with clusters of any profile types. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.region_placement.RegionPlacementPolicy - -Sample -~~~~~~ - -A typical spec for a region placement policy is shown in the following sample: - -.. literalinclude :: /../../examples/policies/placement_region.yaml - :language: yaml - -In this sample spec, two regions are provided, namely "``region_1``" and -"``region_2``". There are "weight" and "cap" attributes associated with them, -both of which are optional. - -The "``weight``" value is to be interpreted as a relative number. The value -assigned to one region has to be compared to those assigned to other regions -for an assessment. In the sample shown above, ``region_1`` and ``region_2`` -are assigned weights with 100 and 200 respectively. This means that among -every 3 nodes creation, one is expected to be scheduled to ``region_1`` and -the other 2 is expected to be scheduled to ``region_2``. Put it in another -way, the chance for ``region_2`` receiving a node creation request is twice of -that for ``region_1``. - -The "``weight``" value has to be a positive integer, if specified. The default -value is 100 for all regions whose weight is omitted. - -There are cases where each region has different amounts of resources -provisioned so their capacity for creating and running nodes differ. To deal -with these situations, you can assign a "``cap``" value to such a region. This -effectively tells the Senlin engine that a region is not supposed to -accommodate nodes more than the specified number. - - -Validation -~~~~~~~~~~ - -When creating a region placement policy, the Senlin engine validates whether -the region names given are all known to be available regions by the keystone -identity service. Do NOT pass in an invalid region name and hope Senlin can -create a region for you. - -Later on when the policy is triggered by node creation or deletion, it always -validates if the provided regions are still valid and usable. - - -Node Distribution -~~~~~~~~~~~~~~~~~ - -After a region placement policy is attached to a cluster and enabled, all -future node creations (by cluster scaling for example) will trigger an -evaluation of the policy. - -The region placement policy will favor regions with highest weight value when -selecting a region for nodes to be created. It will guarantee that no more -than the provided ``cap`` number of nodes will be allocated to a specific -region. - -Node distribution is calculated not only when new nodes are created and added -to a cluster, it is also calculated when existing nodes are to be removed from -the cluster. The policy will strive to maintain a distribution close to the -one computed from the weight distribution of all regions. diff --git a/doc/source/user/policy_types/scaling.rst b/doc/source/user/policy_types/scaling.rst deleted file mode 100644 index 0cbf8b6ce..000000000 --- a/doc/source/user/policy_types/scaling.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-scaling-policy: - -============== -Scaling Policy -============== - -The scaling policy is designed to supplement a cluster scaling request with -more detailed arguments based on user-provided rules. This policy type is -expected to be applicable on clusters of all profile types. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.scaling_policy.ScalingPolicy - -Sample -~~~~~~ - -A typical spec for a scaling policy is shown below: - -.. literalinclude :: /../../examples/policies/scaling_policy.yaml - :language: yaml - -You should pay special attentions to the ``event`` property, whose valid -values include "``CLUSTER_SCALE_IN``" and "``CLUSTER_SCALE_OUT``". One -implication of this design is that you have to attach two policies to the -same cluster if you want to control the scaling behavior both when you are -expanding the cluster and when you are shrinking it. You can not control the -scaling behavior in both directions using the same policy. - -Senlin has carefully designed the builtin policy types so that for scaling -policies, you can attach more than one instance of the same policy type but -you may get an error when you are attempting to attach two policies of another -type (say ``senlin.policy.deletion``) to the same cluster. - -The value of the ``event`` property indicates when the policy will be checked. -A policy with ``event`` set to "``CLUSTER_SCALE_IN``" will be checked when and -only when a corresponding action is triggered on the cluster. A policy with -``event`` set to "``CLUSTER_SCALE_OUT``" will be checked when and only when -a corresponding action is triggered. If the cluster is currently processing a -scaling action it will not accept another scaling action until the current -action has been processed and cooldown has been observed. - -For both types of actions that can triggered the scaling policy, there are -always three types of adjustments to choose from as listed below. The type -of adjustment determines the interpretation of the ``adjustment.number`` value. - -- ``EXACT_CAPACITY``: the value specified for ``adjustment.number`` means the - new capacity of the cluster, so it has to be a non-negative integer. - -- ``CHANGE_IN_CAPACITY``: the value specified for ``adjustment.number`` is the - number of nodes to be added or removed. This means the value has to be a - non-negative number as well. - -- ``CHANGE_IN_PERCENTAGE``: the value specified for ``adjustment.number`` will - be interpreted as the percentage of capacity changes. This value has to be - a non-negative floating-point value. - -For example, in the sample spec shown above, when a ``CLUSTER_SCALE_IN`` -request is received, the policy will remove 10% of the total number of nodes -from the cluster. - - -Dealing With Percentage -~~~~~~~~~~~~~~~~~~~~~~~ - -As stated above, when ``adjustment.type`` is set to ``CHANGE_IN_PERCENTAGE``, -the value of ``adjustment.number`` can be a floating-point value, interpreted -as a percentage of the current node count of the cluster. - -In many cases, the result of the calculation may be a floating-point value. -For example, if the current capacity of a cluster is 5 and the -``adjustment.number`` is set to 30%, the compute result will be 1.5. In this -situation, the scaling policy rounds the number up to its adjacent integer, -i.e. 2. If the ``event`` property has "``CLUSTER_SCALE_OUT``" as its value, -the policy decision is to add 2 nodes to the cluster. If on the other hand the -``event`` is set to "``CLUSTER_SCALE_IN``", the policy decision is to remove -2 nodes from the cluster. - -There are other corner cases to consider as well. When the compute result is -less than 0.1, for example, it becomes a question whether the Senlin engine -should add (or remove) nodes. The property ``adjustment.min_step`` is designed -to make this decision. After policy has got the compute result, it will check -if it is less than the specified ``adjustment.min_step`` value and it will use -the ``adjustment.min_step`` value if so. - - -Best Effort Scaling -~~~~~~~~~~~~~~~~~~~ - -In many auto-scaling usage scenarios, the policy decision may break the size -constraints set on the cluster. As an example, a cluster has its ``min_size`` -set to 5, ``max_size`` set to 10 and its current capacity is 7. If the policy -decision is to remove 3 nodes from the cluster, we are in a dilemma. Removing -3 nodes will change the cluster capacity to 4, which is not allowed by the -cluster. If we don't remove 3 nodes, we are not respecting the policy -decision. - -The ``adjustment.best_effort`` property is designed to mitigate this situation. -When it is set to False, the scaling policy will strictly conform to the rules -set. It will reject the scaling request if the computed cluster capacity will -break its size constraints. However, if ``adjustment.best_effort`` is set to -True, the scaling policy will strive to compute a sub-optimal number which -will not break the cluster's size constraints. In the above example, this -means the policy decision will be "remove 2 nodes from the cluster". In other -words, the policy at least will try partially ful-fill the scaling goal for -the sake of respecting the size constraint. - - -Cooldown -~~~~~~~~ - -In real-life cluster deployments, workload pressure fluctuates rapidly. During -this minute, it smells like there is a need to add 10 more nodes to handle the -bursting workload. During the next minute, it may turn out to be a false -alarm, the workload is rapidly decreasing. Since it is very difficult to -accurately predict the workload changes, if possible at all, an auto-scaling -engine is not supposed to react too prematurely to workload fluctuations. - -The ``cooldown`` property gives you a chance to specify an interval during -which the cluster will remain ignorant to scaling requests. Setting a large -value to this property will lead to a stable cluster, but the responsiveness -to urgent situation is also sacrificed. Setting a small value, on the -contrary, can meet the responsiveness requirement, but will also render the -cluster into a thrashing state where new nodes are created very frequently -only to be removed shortly. - -There is never a recommended value that suits all deployments. You will have -to try different values in your own environment and tune it for different -applications or services. - - -Interaction with Other Policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The scaling policy is only tasked to decide the number of nodes to add or -remove. For newly added nodes, you will use other policies to determine where -they should be scheduled. For nodes to be deleted, you will use other polices -(e.g. the deletion policy) to elect the victim nodes. - -The builtin policies were designed carefully so that they can work happily -together or by themselves. - diff --git a/doc/source/user/policy_types/zone_placement.rst b/doc/source/user/policy_types/zone_placement.rst deleted file mode 100644 index e80230f0d..000000000 --- a/doc/source/user/policy_types/zone_placement.rst +++ /dev/null @@ -1,85 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-zone-policy: - -===================== -Zone Placement Policy -===================== - -The zone placement policy is designed to enable the deployment and management -resource pools across multiple availability zones. Note that the current design -is only concerned with the availability zones configured to Nova compute -service. Support to Cinder availability zones and Neutron availability zones -may be added in future when we have volume storage specific or network -specific profile types. - -The current implementation of the zone placement policy works with clusters of -Nova virtual machines only. - - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.policies.zone_placement.ZonePlacementPolicy - -Sample -~~~~~~ - -A typical spec for a zone placement policy is exemplified in the following -sample: - -.. literalinclude :: /../../examples/policies/placement_zone.yaml - :language: yaml - -In this sample spec, two availability zones are provided, namely "``az_1``" and -"``az_2``". Each availability zone can have an optional "``weight``" attribute -associated with it. - -The "``weight``" value is to be interpreted as a relative number. The value -assigned to one zone has to be compared to those assigned to other zones for -an assessment. In the sample shown above, ``az_1`` and ``az_2`` are assigned -weights of 100 and 200 respectively. This means that among every 3 nodes -creation, one is expected to be scheduled to ``az_1`` and the other 2 are -expected to be scheduled to ``az_2``. In other words, the chance for ``az_2`` -receiving a node creation request is twice of that for ``az_1``. - -The "``weight``" value has to be a positive integer, if specified. The default -value is 100 for all zones whose weight is omitted. - - -Validation -~~~~~~~~~~ - -When creating a zone placement policy, the Senlin engine validates whether -the zone names given are all known to be usable availability zones by the Nova -compute service. Do NOT pass in an invalid availability zone name and hope -Senlin can create a zone for you. - -Later on when the zone placement policy is triggered upon node creation or node -deletion actions, it always validates if the provided availability zones are -still valid and usable. - - -Node Distribution -~~~~~~~~~~~~~~~~~ - -After a zone placement policy is attached to a cluster and enabled, all future -node creations (by cluster scaling for example) will trigger an evaluation of -the policy. Similarly, a node deletion action will also trigger an evaluation -of it because the policy's goal is to maintain the node distribution based on -the one computed from the weight distribution of all zones. - -The zone placement policy will favor availability zones with highest weight -values when selecting a zone for nodes to be created. diff --git a/doc/source/user/profile_types.rst b/doc/source/user/profile_types.rst deleted file mode 100644 index 78fe510c7..000000000 --- a/doc/source/user/profile_types.rst +++ /dev/null @@ -1,225 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-profile-types: - -============= -Profile Types -============= - -Concept -~~~~~~~ - -A :term:`Profile Type` can be treated as the meta-type of a :term:`Profile` -object. A registry of profile types is built in memory when Senlin engine -(:program:`senlin-engine`) is started. In future, Senlin will allow users to -provide additional profile type implementations as plug-ins to be loaded -dynamically. - -A profile type implementation dictates which fields are required. When a -profile is created by referencing this profile type, the fields are assigned -with concrete values. For example, a profile type can be ``os.heat.stack`` -that conceptually specifies the properties required: - -:: - - context: Map - template: Map - parameters: Map - files: Map - timeout: Integer - disable_rollback: Boolean - environment: Map - -A profile of type ``os.heat.stack`` may look like: - -:: - - # a spec for os.heat.stack - type: os.heat.stack - version: 1.0 - properties: - context: - region_name: RegionOne - template: - heat_template_version: 2014-10-16 - parameters: - length: Integer - resources: - rand: - type: OS::Heat::RandomString - properties: - len: {get_param: length} - outputs: - rand_val: - value: {get_attr: [rand, value]} - parameters: - length: 32 - files: {} - timeout: 60 - disable_rollback: True - environment: {} - - -Listing Profile Types -~~~~~~~~~~~~~~~~~~~~~ - -Senlin server comes with some built-in profile types. You can check the list -of profile types using the following command:: - - $ openstack cluster profile type list - +----------------------------+---------+----------------------------+ - | name | version | support_status | - +----------------------------+---------+----------------------------+ - | container.dockerinc.docker | 1.0 | EXPERIMENTAL since 2017.02 | - | os.heat.stack | 1.0 | SUPPORTED since 2016.04 | - | os.nova.server | 1.0 | SUPPORTED since 2016.04 | - +----------------------------+---------+----------------------------+ - -The output is a list of profile types supported by the Senlin server. - - -Showing Profile Details -~~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Profile Type` has a schema for its *spec* (i.e. specification) -that describes the names and the types of properties that can be accepted. To -show the schema of a specific profile type along with other properties, you -can use the following command:: - - $ openstack cluster profile type show os.heat.stack-1.0 - support_status: - '1.0': - - since: '2016.04' - status: SUPPORTED - id: os.heat.stack-1.0 - location: null - name: os.heat.stack - schema: - context: - default: {} - description: A dictionary for specifying the customized context for - stack operations - required: false - type: Map - updatable: false - disable_rollback: - default: true - description: A boolean specifying whether a stack operation can be - rolled back. - required: false - type: Boolean - updatable: true - <... omitted ...> - timeout: - description: A integer that specifies the number of minutes that a - stack operation times out. - required: false - type: Integer - updatable: true - -Here, each property has the following attributes: - -- ``default``: the default value for a property when not explicitly specified; -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`openstack cluster profile type show` -command is in YAML format. You can choose to show the spec schema in JSON -format by specifying the :option:`-f json` option as exemplified below:: - - $ openstack cluster profile type show -f json os.heat.stack-1.0 - { - "support_status": { - "1.0": [ - { - "status": "SUPPORTED", - "since": "2016.04" - } - ] - }, - "name": "os.heat.stack", - "schema": { - "files": { - "default": {}, - "required": false, - "type": "Map", - "description": "Contents of files referenced by the template, if any.", - "updatable": true - }, - <... omitted ...> - "context": { - "default": {}, - "required": false, - "type": "Map", - "description": "A dictionary for specifying the customized context for stack operations", - "updatable": false - } - }, - "id": "os.heat.stack-1.0", - "location": null - } - - -Showing Profile Type Operations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each :term:`Profile Type` has built-in operations, you can get the operations -of a profile type using the following command:: - - $ openstack cluster profile type ops os.heat.stack-1.0 - operations: - abandon: - description: Abandon a heat stack node. - required: false - type: Map - updatable: false - -Here, each property has the following attributes: - -- ``description``: a textual description of the use of a property; -- ``required``: whether the property must be specified. Such kind of a - property usually doesn't have a ``default`` value; -- ``type``: one of ``String``, ``Integer``, ``Boolean``, ``Map`` or ``List``; -- ``updatable``: a boolean indicating whether a property is updatable. - -The default output from the :command:`openstack cluster profile type ops` -command is in YAML format. You can choose to show the spec schema in JSON -format by specifying the :option:`-f json` option as exemplified below:: - - $ openstack cluster profile type ops -f json os.heat.stack-1.0 - { - "operations": { - "abandon": { - "required": false, - "type": "Map", - "description": "Abandon a heat stack node.", - "updatable": false - } - } - } - - -See Also -~~~~~~~~ - -Below is a list of links to the documents related to profile types: - -* :doc:`Managing Profile Objects ` -* :doc:`Creating and Managing Clusters ` -* :doc:`Creating and Managing Nodes ` -* :doc:`Managing Cluster Membership ` -* :doc:`Browsing Events ` diff --git a/doc/source/user/profile_types/docker.rst b/doc/source/user/profile_types/docker.rst deleted file mode 100644 index dc0ba2b05..000000000 --- a/doc/source/user/profile_types/docker.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-docker-profile: - -============== -Docker Profile -============== - -The docker profile instantiates nodes that are associated with docker container -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.container.docker.DockerProfile - -Sample -~~~~~~ - -Below is a typical spec for a docker profile: - -.. literalinclude :: /../../examples/profiles/docker_container/docker_basic.yaml - :language: yaml \ No newline at end of file diff --git a/doc/source/user/profile_types/nova.rst b/doc/source/user/profile_types/nova.rst deleted file mode 100644 index e9c6b5f78..000000000 --- a/doc/source/user/profile_types/nova.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-nova-profile: - -============ -Nova Profile -============ - -The nova profile instantiates nodes that are associated with nova server -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.os.nova.server.ServerProfile - -Sample -~~~~~~ - -Below is a typical spec for a nova profile: - -.. literalinclude :: /../../examples/profiles/nova_server/cirros_basic.yaml - :language: yaml \ No newline at end of file diff --git a/doc/source/user/profile_types/stack.rst b/doc/source/user/profile_types/stack.rst deleted file mode 100644 index 2fd1b721c..000000000 --- a/doc/source/user/profile_types/stack.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-stack-profile: - -============= -Stack Profile -============= - -The stack profile instantiates nodes that are associated with heat stack -instances. - -Properties -~~~~~~~~~~ - -.. schemaprops:: - :package: senlin.profiles.os.heat.stack.StackProfile - -Sample -~~~~~~ - -Below is a typical spec for a stack profile: - -.. literalinclude :: /../../examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml - :language: yaml diff --git a/doc/source/user/profiles.rst b/doc/source/user/profiles.rst deleted file mode 100644 index 087bd7546..000000000 --- a/doc/source/user/profiles.rst +++ /dev/null @@ -1,426 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -.. _ref-profiles: - -======== -Profiles -======== - -Concept -~~~~~~~ - -A :term:`Profile` is the mould used for creating a :term:`Node` to be managed -by the Senlin service. It can be treated as an instance of a -:term:`Profile Type` with a unique ID. A profile encodes the information -needed for node creation in a property named ``spec``. - -The primary job for a profile type implementation is to translate user provided -JSON data structure into information that can be consumed by a driver. A -driver will create/delete/update a physical object based on the information -provided. - - -Listing Profiles -~~~~~~~~~~~~~~~~ - -To examine the list of profile objects supported by the Senlin engine, you can -use the following command:: - - $ openstack cluster profile list - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | 560a8f9d | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - | ceda64bd | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+----------+--------------------+---------------------+ - -Note that the first column in the output table is a *short ID* of a profile -object. Senlin command line use short IDs to save real estate on screen so -that more useful information can be shown on a single line. To show the *full -ID* in the list, you can add the :option:`--full-id` option to the command:: - - $ openstack cluster profile list --full-id - +-------------------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +-------------------+----------+--------------------+---------------------+ - | 560a8f9d-7596-... | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - | ceda64bd-70b7-... | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538-a675-... | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +-------------------+----------+--------------------+---------------------+ - -The ``id`` column above contains the full UUID of profiles. - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list profiles, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are profile properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the profiles using the ``name`` -property in descending order:: - - $ openstack cluster profile list --sort name:desc - -When sorting the list of profiles, you can use one of ``type``, ``name``, -``created_at`` and ``updated_at``. - - -Filtering the List ------------------- - -The :program:`openstack cluster profile list` command also provides options -for filtering the profile list at the server side. The option -:option:`--filters` can be used for this purpose. For example, the following -command filters the profile by the ``type`` field:: - - $ openstack cluster profile list --filter "type=os.heat.stack-1.0" - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | ceda64bd | mystack | os.heat.stack-1.0 | 2015-05-05T13:26:25 | - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+----------+--------------------+---------------------+ - -The option :option:`--filters` accepts a list of key-value pairs separated -by semicolon (``;``), where each key-value pair is expected to be of format -``=``. The valid keys for filtering include: ``name`` and -``type``. - - -Paginating the List -------------------- - -In case you have a huge collection of profile objects, you can limit the -number of profiles returned from Senlin server, using the option -:option:`--limit `. For example:: - - $ openstack cluster profile list --limit 1 - +----------+----------+--------------------+---------------------+ - | id | name | type | created_at | - +----------+----------+--------------------+---------------------+ - | 560a8f9d | myserver | os.nova.server-1.0 | 2015-05-05T13:26:00 | - +----------+----------+--------------------+---------------------+ - -Yet another option you can specify is the ID of a profile object after which -you want to see the list starts. In other words, you don't want to see those -profiles with IDs is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster profile list --limit 1 \ - --marker ceda64bd-70b7-4711-9526-77d5d51241c5 - +----------+--------+-------------------+---------------------+ - | id | name | type | created_at | - +----------+--------+-------------------+---------------------+ - | 9b127538 | pstack | os.heat.stack-1.0 | 2015-06-25T12:59:01 | - +----------+--------+-------------------+---------------------+ - - -Creating a Profile -~~~~~~~~~~~~~~~~~~ - -Before working with a :term:`Cluster` or a :term:`Node`, you will need a -:term:`Profile` object created with a profile type. To create a profile, you -will need a "spec" file in YAML format. For example, below is a simple spec -for the ``os.heat.stack`` profile type (the source can be found in the -:file:`/examples/profiles/heat_stack/random_string/ -heat_stack_random_string.yaml` file). - -:: - - type: os.heat.stack - version: 1.0 - properties: - name: random_string_stack - template: random_string_stack.yaml - context: - region_name: RegionOne - -The ``random_string_stack.yaml`` is the name of a Heat template file to be used -for stack creation. It is given here only as an example. You can -decide which properties to use based on your requirements. - -Now you can create a profile using the following command:: - - $ cd /opt/stack/senlin/examples/profiles/heat_stack/random_string - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - my_stack - +------------+-------------------------------------------------------------+ - | Field | Value | - +------------+-------------------------------------------------------------+ - | created_at | 2015-07-01T03:13:23 | - | domain_id | None | - | id | c0389712-9c1a-4c58-8ba7-caa61b34b8b0 | - | location | None | - | metadata | {} | - | name | my_stack | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+--------------------------------------------+ | - | | | property | value | | - | | +------------+--------------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.heat.stack" | | - | | | properties | { | | - | | | | "files": { | | - | | | | "file:///...": "" | | - | | | | }, | | - | | | | "disable_rollback": true, | | - | | | | "template": { | | - | | | | "outputs": { | | - | | | | "result": { | | - | | | | "value": { | | - | | | | "get_attr": [ | | - | | | | "random", | | - | | | | "value" | | - | | | | ] | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "heat_template_version": "2014-10-16", | | - | | | | "resources": { | | - | | | | "random": { | | - | | | | "type": "OS::Heat::RandomString", | | - | | | | "properties": { | | - | | | | "length": 64 | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "parameters": { | | - | | | | "file": { | | - | | | | "default": { | | - | | | | "get_file": "file:///..." | | - | | | | }, | | - | | | | "type": "string" | | - | | | | } | | - | | | | } | | - | | | | }, | | - | | | | "parameters": {}, | | - | | | | "timeout": 60, | | - | | | | "environment": { | | - | | | | "resource_registry": { | | - | | | | "os.heat.server": "OS::Heat::Server" | | - | | | | } | | - | | | | }, | | - | | | | "context": { | | - | | | | "region_name": "RegionOne" | | - | | | | } | | - | | | | } | | - | | +------------+--------------------------------------------+ | - | type | os.heat.stack-1.0 | - | updated_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+-------------------------------------------------------------+ - -From the outputs, you can see that the profile is created with a new ``id`` -generated. The ``spec`` property is dumped for the purpose of verification. - -Optionally, you can attach some key-value pairs to the new profile when -creating it. This data is referred to as the *metadata* for the profile:: - - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - --metadata "author=Tom;version=1.0" \ - my_stack - - $ openstack cluster profile create \ - --spec heat_stack_random_string.yaml \ - --metadata author=Tom --metadata version=1.0 \ - my_stack - - -Showing the Details of a Profile -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once there are profile objects in Senlin database, you can use the following -command to show the properties of a profile:: - - $ openstack cluster profile show myserver - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - -Note that :program:`openstack cluster` command line accepts one of the -following values when retrieving a profile object: - -- name: the name of a profile; -- ID: the UUID of a profile; -- short ID: an "abbreviated version" of the profile UUID. - -Since Senlin doesn't require a profile name to be unique, specifying profile -name for the :command:`openstack cluster profile show` command won't guarantee -that a profile object is returned. You may get a ``MultipleChoices`` exception -if more than one profile object match the name. - -As another option, when retrieving a profile (or in fact any other objects, -e.g. a cluster, a node, a policy etc.), you can specify the leading sub-string -of an UUID as the "short ID" for query. For example:: - - $ openstack cluster profile show 70a36cc7 - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - $ openstack cluster profile show 70a3 - +------------+---------------------------------------------------------+ - | Field | Value | - +------------+---------------------------------------------------------+ - | created_at | 2015-07-01T03:18:58 | - | domain_id | None | - | id | 70a36cc7-9fc7-460e-98f6-d44e3302e604 | - | location | None | - | metadata | {} | - | name | my_server | - | project_id | 333acb15a43242f4a609a27cb097a8f2 | - | spec | +------------+----------------------------------------+ | - | | | property | value | | - | | +------------+----------------------------------------+ | - | | | version | 1.0 | | - | | | type | "os.nova.server" | | - | | | properties | { | | - | | | | "key_name": "oskey", | | - | | | | "flavor": 1, | | - | | | | "networks": [ | | - | | | | { | | - | | | | "network": "private" | | - | | | | } | | - | | | | ], | | - | | | | "image": "cirros-0.3.2-x86_64-uec", | | - | | | | "name": "cirros_server" | | - | | | | } | | - | | +------------+----------------------------------------+ | - | type | os.nova.server-1.0 | - | update_at | None | - | user_id | 5e5bf8027826429c96af157f68dc9072 | - +------------+---------------------------------------------------------+ - -As with query by name, a "short ID" won't guarantee that a profile object is -returned even if it does exist. When there are more than one object matching -the short ID, you will get a ``MultipleChoices`` exception. - - -Updating a Profile -~~~~~~~~~~~~~~~~~~ - -In general, a profile object should not be updated after creation. This is a -restriction to keep cluster and node status consistent at any time. However, -considering that there are cases where a user may want to change some -properties of a profile, :program:`openstack cluster` command line does -support the :command:`profile update` sub-command. For example, the following -command changes the name of a profile to ``new_server``:: - - $ openstack cluster profile update --name new_server myserver - -The following command creates or updates the metadata associated with the given -profile:: - - $ openstack cluster profile update --metadata version=2.2 myserver - -Changing the "spec" of a profile is not allowed. The only way to make a change -is to create a new profile using the :command:`profile create` sub-command. - - -Deleting a Profile -~~~~~~~~~~~~~~~~~~ - -When there are no clusters or nodes referencing a profile object, you can -delete it from the Senlin database using the following command:: - - $ openstack cluster profile delete myserver - -Note that in this command you can use the name, the ID or the "short ID" to -specify the profile object you want to delete. If the specified criteria -cannot match any profiles, you will get a ``ResourceNotFound`` exception.If -more than one profile matches the criteria, you will get a ``MultipleChoices`` -exception. For example:: - - $ openstack cluster profile delete my - ERROR(404): The profile (my) could not be found. - Failed to delete any of the specified profile(s). - - -See Also -~~~~~~~~ - -The following is a list of the links to documents related to profile's -creation and usage: - -- :doc:`Working with Profile Types ` -- :ref:`Nova Profile ` -- :ref:`Stack Profile ` -- :ref:`Docker Profile ` -- :doc:`Creating and Managing Clusters ` -- :doc:`Creating and Managing Nodes ` -- :doc:`Managing Cluster Membership ` -- :doc:`Examining Actions ` -- :doc:`Browsing Events ` diff --git a/doc/source/user/receivers.rst b/doc/source/user/receivers.rst deleted file mode 100644 index 49e343bc3..000000000 --- a/doc/source/user/receivers.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _ref-receivers: - -======== -Receiver -======== - -A :term:`Receiver` is used to prepare Senlin engine to react to external alarms -or events so that a specific :term:`Action` can be initiated on a senlin -cluster automatically. For example, when workload on a cluster climbs high, -a receiver can change the size of a specified cluster. - - -Listing Receivers -~~~~~~~~~~~~~~~~~ - -The :program:`openstack cluster` command line provides a sub-command -:command:`receiver list` that can be used to enumerate receiver objects known -to the service. For example:: - - $ openstack cluster receiver list - - -Sorting the List ----------------- - -You can specify the sorting keys and sorting direction when list receivers, -using the option :option:`--sort`. The :option:`--sort` option accepts a -string of format ``key1[:dir1],key2[:dir2],key3[:dir3]``, where the keys used -are receiver properties and the dirs can be one of ``asc`` and ``desc``. When -omitted, Senlin sorts a given key using ``asc`` as the default direction. - -For example, the following command sorts the receivers using the ``name`` -property in descending order:: - - $ openstack cluster receiver list --sort name:desc - -When sorting the list of receivers, you can use one of ``type``, ``name``, -``action``, ``cluster_id``, ``created_at``. - - -Paginating the List -------------------- - -In case you have a huge collection of receiver objects, you can limit the -number of receivers returned from Senlin server, using the option -:option:`--limit`. For example:: - - $ openstack cluster receiver list --limit 1 - -Yet another option you can specify is the ID of a receiver object after which -you want to see the list starts. In other words, you don't want to see those -receivers with IDs that is or come before the one you specify. You can use the -option :option:`--marker ` for this purpose. For example:: - - $ openstack cluster receiver list \ - --limit 1 --marker 239d7212-6196-4a89-9446-44d28717d7de - -Combining the :option:`--marker` option and the :option:`--limit` option -enables you to do pagination on the results returned from the server. - - -Creating and Using a Receiver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Currently, Senlin supports two receiver types: "``webhook``" and "``message``". -For the former one, a permanent webhook url is generated for users to trigger -a specific action on a given cluster by sending a HTTP POST request. For the -latter one, a Zaqar message queue is created for users to post a message. -Such a message is used to notify the Senlin service to initiate an action on a -specific cluster. - -Webhook Receiver ----------------- - -When creating a webhook receiver, you are expected to use the option -:option:`--cluster` to specify the target cluster and the option -:option:`--action` to specify the action name. By default, the -:program:`openstack cluster receiver create` command line creates a receiver -of type "``webhook``". User can also explicitly specify the receiver type -using the option :option:`--type`, for example: - -.. code-block:: console - - $ openstack cluster receiver create \ - --cluster test-cluster \ - --action CLUSTER_SCALE_OUT \ - --type webhook \ - test-receiver - +------------+-----------------------------------------------------------+ - | Field | Value | - +------------+-----------------------------------------------------------+ - | action | CLUSTER_SCALE_OUT | - | actor | { | - | | "trust_id": "2e76547947954e6ea62b61a658ffb8e5" | - | | } | - | channel | { | - | | "alarm_url": "http://10.20.10.17:8777/v1/webhooks/...." | - | | } | - | cluster_id | 9f1883a7-6837-4fe4-b621-6ec6ba6c3668 | - | created_at | 2018-02-24T09:23:48Z | - | domain_id | None | - | id | 2a5a266d-0c3a-456c-bbb7-f8b26ef3b7f3 | - | location | None | - | name | test-receiver | - | params | {} | - | project_id | bdeecc1b58004bb19302da77ac056b44 | - | type | webhook | - | updated_at | None | - | user_id | e1ddb7e7538845968789fd3a863de928 | - +------------+-----------------------------------------------------------+ - -Senlin service will return the receiver information with its channel ready to -receive HTTP POST requests. For a webhook receiver, this means you can check -the "``alarm_url``" field of the "``channel``" property. You can use this URL -to trigger the action you specified. - -The following command triggers the receiver by sending a ``POST`` request to -the URL obtained from its ``channel`` property, for example: - -.. code-block:: console - - $ curl -X POST - - -Message Receiver ----------------- - -A message receiver is different from a webhook receiver in that it can trigger -different actions on different clusters. Therefore, option :option:`--cluster` -and option :option:`--action` can be omitted when creating a message receiver. -Senlin will check if the incoming message contains such properties. - -You will need to specify the receiver type "``message``" using the option -:option:`--type` when creating a message receiver, for example: - -.. code-block:: console - - $ openstack cluster receiver create \ - --type message \ - test-receiver - -Senlin service will return the receiver information with its channel ready to -receive messages. For a message receiver, this means you can check the -"``queue_name``" field of the "``channel``" property. - -Once a message receiver is created, you (or some software) can send messages -with the following format to the named Zaqar queue to request Senlin service: - -.. code-block:: python - - { - "messages": [ - { - "ttl": 300, - "body": { - "cluster": "test-cluster", - "action": "CLUSTER_SCALE_OUT", - "params": {"count": 2} - } - } - ] - } - -More examples on sending message to a Zaqar queue can be found here: - -https://opendev.org/openstack/python-zaqarclient/src/branch/master/examples - -.. note:: - - Users are permitted to trigger multiple actions at the same time by sending - more than one message to a Zaqar queue in the same request. In that case, - the order of actions generated depends on how Zaqar sorts those messages. diff --git a/doc/specs/README.rst b/doc/specs/README.rst deleted file mode 100644 index de254589b..000000000 --- a/doc/specs/README.rst +++ /dev/null @@ -1,27 +0,0 @@ -INTRO -===== - -This directory holds the proposals of non-trivial changes to senlin. We host -them here to avoid the potential headaches in managing yet another project, -say `senlin-specs`. When the needs rise up for a dedicated project for -proposals, we can create such a project and migrate things here. - - -DIRECTORY LAYOUT -================ - -Proposals will be put into this directory during review. After being reviewed, -it will be migrated into the `rejected` subdirectory or the `approved` -subdirectory respectively. - - -rejected --------- - -A subdirectory for proposals that were rejected. - - -approved --------- - -A subdirectory for proposals that were approved. diff --git a/doc/specs/approved/README.rst b/doc/specs/approved/README.rst deleted file mode 100644 index 6d0df9dbd..000000000 --- a/doc/specs/approved/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -This directory holds the feature proposals that have been approved. Once the -features are landed, the contents should be migrated into a design document -instead of being kept here. diff --git a/doc/specs/approved/container-cluster.rst b/doc/specs/approved/container-cluster.rst deleted file mode 100644 index d787ca390..000000000 --- a/doc/specs/approved/container-cluster.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Container Cluster -================= - -The mission of the Senlin project is to provide a generic clustering service -for an OpenStack cloud. Currently Senlin provides Nova instance type and -Heat stack type clustering service, it's natural to think about container -cluster. - -Problem Description -=================== - -As for container service, Magnum is a project which provides an API for users -to build the container orchestration engine such as Docker Swarm, Kubernetes -and Apache Mesos. By using these engines users can build their container cloud, -and manage the cloud. But these container clouds created by these tools are not -managed by Magnum after they are created. That means those containers are not -OpenStack-managed resources, thus other projects which want to use container -resources can't invoke Magnum to acquire them. Furthermore, the dependency on -those engines will cause version management problems and makes it difficult -to test the container engine because the engines are not implemented in Python -language. For the cloud operators who want to use OpenStack to manage -containers, they may want OpenStack's own container service instead of learning -how to use docker swarm etc. - -Use Cases -========= - -For users who want to use container services, they may want to use container -cluster instead of a single container. In an OpenStack cloud, user may want -to deploy containers cluster on baremetal machines or on all or some of the -specific virtual machines in the cloud. This container cluster is desired -to be a scalable, HA, multi-tenant support and high-security cloud and can -be easily controlled by invoking OpenStack standard REST API. - -Proposed Changes -================ - -1. Docker library - Senlin would like to support Docker type container resource. As Docker - provides API to developers, it is very easy to create/delete a container - resource by invoking Docker API directly. - Docker driver will be added for container management. -2. Container Profile - It is necessary to add a new type of profile for container to start with. - In the container profile the required properties like network, volume etc. - will be contained to created a container. -3. Scheduling - To decide to start containers in which virtual/baremetal machines, a - scheduler is needed. There are some existing container schedulers like - docker swarm which are widely used in production, but by thinking about - Senlin's feature, it is reasonable to invent a scheduler which can support - container auto-scaling better. For example, starting containers - preferentially in specified nodes whose cpu utilization is lower than a - certain value. - This is an intelligent but complicated solution for container scheduling, - to meet the limited needs, Senlin placement policy can be used to work as - a scheduler to take place of complicated scheduler implementation. - For the simplest case, add 'host_node' and 'host_cluster' properties into - container profile, which can be used to determine the placement of - containers. Since Senlin supports scaling, some rules should be obeyed - to cooperate host_node and host_cluster usage. - - * Only container type profile can contain 'host_node' and 'host_cluster' - properties. - * Container type profile must contain both 'host_node' and 'host_cluster' - properties, but either not both of them can be None. - * Host_node must belong to host_cluster. - * If host_node is None and host_cluster is not None, container will be - started on some node of the cluster randomly.(This may be changed in - future, to support the case of low CPU, memory usage priority.) -4. Network - To allocate an IP address to every container, a network for container is - desired before creating a container. Kuryr brings container networking to - neutron which can make container networking management similar to Nova - server. Senlin will introduce Kuryr for container networking management. -5. Storage - For the virtual machines in which containers will be started, it is - necessary to attach a volume in advance. The containers started in the - virtual machines will share the volume. Currently Flocker and Rexray are - the options. -6. Policies - The policies for container service are different from virtual machines. - For example, in placement policy the specified nodes of azs or regions - should be provided. -7. Test - Add test cases for container service on both client and server sides. - -Alternatives ------------- - -Any other ideas of managing containers by Senlin. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -Not clear. - -Other end user impact ---------------------- - -User can use Senlin commands to create/update/delete a container cluster. -Managing containers will become much easier. - -Performance Impact ------------------- - -None - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -xuhaiwei -anyone interested - -Work Items ----------- - -Depends on the design plan - -Dependencies -============ - -Depends on Docker. - -Testing -======= - -Undecided - -Documentation Impact -==================== - -Documentation about container cluster will be added. - -References -========== - -None - -History -======= - -Approved: Newton -Implemented: Newton diff --git a/doc/specs/approved/generic-event.rst b/doc/specs/approved/generic-event.rst deleted file mode 100644 index 87f8b64f2..000000000 --- a/doc/specs/approved/generic-event.rst +++ /dev/null @@ -1,256 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Generic Event Interface and Backends -==================================== - -The URL of the launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/generic-event - -Currently senlin has a DB backend to log events that might be interested to -users/operators. However, we will also need to send out event notifications -for integration with 3rd party software/services. Users/operators may want to -dump the events into a file or a time series database for processing. - -The blueprint proposes a generic interface for dumping events/notifications. -Such an interface can be implemented by different backends as event plugins. - -Problem description -=================== - -While the Senlin engine is operating clusters or nodes, interacting with other -services or enforcing policies, there are many cases where the operations -(and the results) should be dumped. - -Currently, Senlin only has a builtin event table in database. It is -accumulating very fast, it is not flexible and the content is not versioned. - -To integrate with other services, Senlin will need to generate and send -notifications when certain events happen. More complex (pre-)processing can -be offloaded to service dedicated to this task (e.g. Panko from Ceilomter), -but the basic notifications should always come from the engine. -(Note that we treat "notifications" as a special form of events, i.e. they -are "events on the wire", they are events sent to a message queue for other -services/software to consume.) - -As Senlin evolves, changes are inevitable regarding to the content of the -payload of such events and/or notifications. To best protect users investment -in downstream event processing, we will need to be very explicit about the -content and format of each and every event/notification. - -The format of event/notification should be well documented so that users or -developers of downstream software don't need digging into Senlin's source code -to find out the exact format of each event type or notification type. This -should remain true even when the event/notification format evolves over time. - -There is no one-size-fits-all solution that meets all requirements from the -use cases enumerated in the "Use Cases" subsection below. The event generation -has to be an open framework, with a generic interface, that allows for -diversified backend implementation, aka, drivers. - -Events or notifications are inherently of different criticality or severity. -Users should be able to filter the events by their severity easily. Similarly, -events or notifications are generated from different types of modules, e.g. -``engine``, ``profile``, ``policy``, we may want to enable an operator to -specify the sources of events to include or exclude. Note the source-based -filtering is not a high priority requirement as we see it today. - -Use Cases ---------- - -The dumping of events could serve several use cases: - -- Problem diagnosis: Although there are cases where users can check the logs - from the engine (let's suppose we are already dumping rich information - already), it is unlikely that everyone is granted access to the raw log - files. Event logs are a replacement for raw log files. -- Integration with Other Software: When building a solution by integrating - Senlin with other software/services, the said service may need Senlin to - emit events of interests so that some operations can be adjusted - dynamically. -- Auditing: In the case where there are auditing requirements regarding - user behavior analysis or resource usage tracking, a history of user - operations would be very helpful to conduct this kind of analysis. - -Proposed change -=============== - -Add an interface definition for event logging and notification. This will be -an unified interface for all backends. The interface is a generalization of -the existing event dumping support. - -Make the existing event module (which is dumping events into DB tables) a -plugin that implements the logging interface. - -Model all events dumped today as versioned objects. Different event types will -use different objects. This will be done by preserving the existing DB schema -of the ``event`` table if possible. And, more importantly, the event -abstraction should match the expectations from notification interface from -the ``oslo.messaging`` package. We will learn from the versioned notification -design from Nova but we are going one step further. - -Add filters for event/notification generation, regarding the sererity and the -source. Expose these filters as configurable options in ``senlin.conf``. -These filters (among others) may deserve a new section, but we will decide -when we are there. - -Add stevedore plugin loading support to logging, with "``database``" and -"``messaging``" set as default. We may add a ``json file`` backend -for demonstration's purpose, but that is optional. The backend of event -logging (and notification) will be exposed as a multi-string configuration -option in ``senlin.conf``. - -Following the "api-ref" scheme for API documentation, we will document the -formats of all events/notifications in REST files. - -Alternatives ------------- - -Keep the event generation and notification separate. This seems a duplication -of a lot logic. From the source location where you want to fire an event and -also a log and also a notification, you may have to do three calls. - -Data model impact ------------------ - -We will strive to keep the existing DB schema (especially the ``event`` table -format) unless we have a good reason to add columns. - -REST API impact ---------------- - -There is no change to REST API planned. - -Security impact ---------------- - -One thing we not so sure is where to draw the line between "proper" and -"excessive" dumping of events. We will need some profiling when trading things -off. - -Both events and notifications will leverage the multi-tenancy support (i.e. -``project`` will be include in the payload), so tenant isolation won't be a -problem. - -Notifications impact --------------------- - -Well... this spec is about constructing the infrastructure for notification, -in addition to events and logs. - -Other end user impact ---------------------- - -Users will be able to see notifications from Senlin in the message queue. -Users will get detailed documentation about the event/notification format. - -No change to python-senlinclient will be involved. - -There could be changes to senlin-dashboard if we change the response from the -``event-list`` or ``event-show`` API, but that is not expected. - -Performance Impact ------------------- - -* An overloaded message queue may lead to slower response of senlin-engine? - Not quite sure. - -* An overloaded DBMS may slow down the senlin-engine. - -* High frequency of event generation, based on common sense, will impact the - service performance. - -Other deployer impact ---------------------- - -There is no new dependency to other packages planned. - -There will be several new config options added. We will make them as generic -as possible because the infrastructure proposed is a generic one. We will -include database and message as the default backend, which should work in -most real deployments. - -The changes to the configuration file will be documented in release notes. - -Developer impact ----------------- - -There will be some reference documents for event/notification format design -for developers of downstream software/service. - -There will be some developer documents for adding new logging backends. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Qiming - -Other contributors: - Anyone who wish to adventure ... - -Work Items ----------- - -Currently identified work items: - -- Abstract class (interface) for logging; -- Rebase event dumping module onto this interface; -- Versioned objects for existing events; -- Driver for dumping events (thus become notifications) to message queue; -- Dynamic loading of both backends (database and message); -- Configuration options for backend selection and customization; -- Documentation of event formats; -- User documentation for events (improvement); -- Developer documentation for new logging backends; - -Dependencies -============ - -No dependency on other specs/bps/projects. - -Need to watch changes in ``oslo.messaging`` and ``oslo.versionedobjects`` to -tune the implementation. - -Testing -======= - -Only unit tests are planned. - -There is not yet plan for API test, functional test, stress test or -integration test. - -Documentation Impact -==================== - -New documentation: - -- Documentation of event formats; -- User documentation for events (improvement); -- Developer documentation for new logging backends; -- Release notes - -References -========== - -N/A - -History -======= - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Ocata - - Introduced diff --git a/doc/specs/cluster-fast-scaling.rst b/doc/specs/cluster-fast-scaling.rst deleted file mode 100644 index ca35812a7..000000000 --- a/doc/specs/cluster-fast-scaling.rst +++ /dev/null @@ -1,159 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================== -Cluster Fast Scaling -==================== - -The URL of launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/add-attribute-fast-scaling-to-cluster - -The major function of senlin is managing clusters, change the capacity of -cluster use scale out and scale in operation. Generally a single scaling -operation will cost tens of seconds, even a few minutes in extreme cases. -It's a long time for actual production environment, so we need to improve -senlin for fast scaling. - -Rather than improve the performance of hardware or optimize code, a better way -is to create some standby nodes while create a new cluster. When cluster need -to change the capacity immediately or replace some nodes in 'error' state to -'active' state nodes, add nodes form standby nodes to cluster, or remove error -nodes from cluster and add active nodes from standby nodes to cluster. - -To make cluster scaling fast, the spec proposes to extend senlin for create -standby nodes and improve scaling operation. - - -Problem description -=================== - -Before real scaling a cluster, senlin need to do many things, the slowest -process is to create or delete a node. - -Use Cases ---------- - -If senlin support fast scaling, the follow cases will be possible: - -- Change the capacity of cluster immediately, no longer waiting for creating -or deleting nodes. - -- Replace the error nodes from cluster immediately, improve high availability -for cluster. - -- Improve the situation that scaling many times in a short time. - -Proposed change -=============== - -1. Add a new attribute 'fast_scaling' in metadata to cluster, with the -attribute set, senlin will create standby nodes when create a new cluster. -The number of standby nodes could be specify, but sum of standby nodes and -nodes in cluster should less than max size of the cluster. - -2. Revise cluster create and cluster delete operation for support new attr, -delete standby nodes when delete a cluster. - -3. Revise scale out and scale in operation, with the new attribute set, add -nodes form standby nodes to cluster or remove nodes from cluster to standby -nodes first. - -4. Revise health policy, check the state of standby nodes and support replace -error nodes to active nodes from standby nodes. - -5. Revise deletion policy, delete nodes or remove nodes to standby nodes when -perform deletion operation. - -Alternatives ------------- - -Any other ideas of fast scale a cluster. - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -The standby nodes will claimed some resources. We should control the number -of standby nodes in a reasonable range. - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -chohoor(Hongbin Li) - -Work Items ----------- - -Depends on the design plan. - - -Dependencies -============ - -None - - -Testing -======= - -Need unit tests. - - -Documentation Impact -==================== - -Documentation about api and operation should be update. - - -References -========== - -None - - -History -======= - -None diff --git a/doc/specs/fail-fast-on-locked_resource.rst b/doc/specs/fail-fast-on-locked_resource.rst deleted file mode 100644 index 7d50762b5..000000000 --- a/doc/specs/fail-fast-on-locked_resource.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================= -Fail fast on locked resources -============================= - - -When an operation on a locked resource (e.g. cluster or node) is requested, -Senlin creates a corresponding action and calls on the engine dispatcher to -asynchronously process it. If the targeted resource is locked by another -operation, the action will fail to process it and the engine will ask the -dispatcher to retry the action up to three times. If the resource is still -locked after three retries, the action is considered failed. The user making -the operation request will not know that an action has failed until the -retries have been exhausted and it queries the action state from Senlin. - -This spec proposes to check the lock status of the targeted resource and fail -immediately if it is locked during the synchronous API call by the user. The -failed action is not automatically retried. Instead it is up to the user to -retry the API call as desired. - - -Problem description -=================== - -The current implementation where failed actions are automatically retried can -lead to starvation situations when a large number of actions on the same target -cluster or node are requested. E.g. if a user requests a 100 scale-in operations -on a cluster, the Senlin engine will take a long time to process the retries and -will not be able to respond to other commands in the meantime. - -Another problem with the current implementation is encountered when health -checks are running against a cluster and the user is simultaneously performing -operations on it. When the health check thread determines that a node is -unhealthy (1), the user could request a cluster scale-out (2) before the health -check thread had a chance to call node recovery (4). In that case the first node -recovery will fail because the cluster is already locked and the node recovery -action will be retried in the background. However after the scale-out -completes and the next iteration of the health check runs, it might still see -the node as unhealthy and request another node recovery. In that case the node -will be unnecessarily recovered twice. - -:: - - +---------------+ +---------------+ +-------+ - | HealthManager | | SenlinEngine | | User | - +---------------+ +---------------+ +-------+ - | -----------------\ | | - |-| Health check | | | - | | thread starts. | | | - | |----------------| | | - | | | - | (1) Is Node healthy? No. | | - |------------------------- | | - | | | | - |<------------------------ | | - | | | - | | (2) Scale Out Cluster. | - | |<---------------------------| - | | | - | | (3) Lock cluster. | - | |------------------ | - | | | | - | |<----------------- | - | | | - | (4) Recover node. | | - |-------------------------------------------------->| | - | | | - | (5) Recover node action created. | | - |<--------------------------------------------------| | - | | | - | | (6) Cluster is locked. | - | | Retry node recover. | - | |----------------------- | - | | | | - | |<---------------------- | - | | | - | (7) Get node recover action status. | | - |-------------------------------------------------->| | - | | | - | (8) Node recover action status is failed. | | - |<--------------------------------------------------| | - | ---------------\ | | - |-| Health check | | | - | | thread ends. | | | - | |--------------| | | - | | | - -Finally, there are other operations that can lead to locked clusters that are -never released as indicated in this bug: -https://bugs.launchpad.net/senlin/+bug/1725883 - -Use Cases ---------- - -As a user, I want to know right away if an operation on a cluster or node fails -because the cluster or node is locked by another operation. By being able to -receive immediate feedback when an operation fails due to a locked resource, the -Senlin engine will adhere to the fail-fast software design principle [1] and -thereby reducing the software complexity and potential bugs due to -locked resources. - -Proposed change -=============== - - -1. **All actions** - - Before an action is created, check if the targeted cluster or node is - already locked in the cluster_lock or node_lock tables. - - * If the target cluster or node is locked, throw a ResourceIsLocked - exception. - * If the action table already has an active action operating on the - target cluster or node, throw a ActionConflict exception. An action - is defined as active if its status is one of the following: - READY, WAITING, RUNNING OR WAITING_LIFECYCLE_COMPLETION. - * If the target cluster or node is not locked, proceed to create the - action. - -2. **ResourceIsLocked** - - New exception type that corresponds to a 409 HTTP error code. - -3. **ActionConflict** - - New exception type that corresponds to a 409 HTTP error code. - - -Alternatives ------------- - -None - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -* Alls Action (changed in **bold**) - - :: - - POST /v1/clusters/{cluster_id}/actions - - - - Normal HTTP response code(s): - - =============== =========================================================== - Code Reason - =============== =========================================================== - 202 - Accepted Request was accepted for processing, but the processing has - not been completed. A 'location' header is included in the - response which contains a link to check the progress of the - request. - =============== =========================================================== - - - Expected error HTTP response code(s): - - ========================== =============================================== - Code Reason - ========================== =============================================== - 400 - Bad Request Some content in the request was invalid. - 401 - Unauthorized User must authenticate before making a request. - 403 - Forbidden Policy does not allow current user to do this - operation. - 404 - Not Found The requested resource could not be found. - **409 - Conflict** **The requested resource is locked by** - **another action** - 503 - Service Unavailable Service unavailable. This is mostly - caused by service configuration errors which - prevents the service from successful start up. - ========================== =============================================== - - - -Security impact ---------------- - -None - -Notifications impact --------------------- - - -Other end user impact ---------------------- - -The python-senlinclient requires modification to return the 409 HTTP error code -to the user. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Unit tests and tempest tests are needed for the new action request behavior when -a resource is locked. - -Documentation Impact -==================== - -End User Guide needs to updated to describe the new behavior of action -requests when a target resource is locked. The End User Guide should also -describe that the user can retry an action if they receive 409 HTTP error code. - -References -========== - -[1] https://www.martinfowler.com/ieeeSoftware/failFast.pdf - - -History -======= - -None diff --git a/doc/specs/lifecycle-hook.rst b/doc/specs/lifecycle-hook.rst deleted file mode 100644 index a0ffdca65..000000000 --- a/doc/specs/lifecycle-hook.rst +++ /dev/null @@ -1,390 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================= -Add lifecycle hooks for scale in action -======================================= - - -The AWS autoscaling service provides a 'lifecycle hook' feature that Senlin -currently lacks. Lifecycle hooks during scaling operations allow the user or -an application to perform custom setup or clean-up of instances. - -This spec proposes to add lifecycle hook specific properties to the deletion -policy applied during node removal operations (i.e. scale-in, cluster-resize, -cluster-node-del and node-delete actions). The lifecycle hook properties specify -a timeout and a Zaqar queue as the notification target. If the node removal -operation detects that a deletion policy with lifecycle hook properties is -attached, it will send a lifecycle hook message to the notification target -for each node identified for deletion. The lifecycle hook message contains the -node ID of the instance to be deleted and a lifecycle action token. In -addition, the node removal operation will defer the actual deletion of those -nodes until the timeout in the deletion policy has been reached. - -This spec also adds a new 'complete lifecycle' API endpoint. When this API -endpoint is called with the lifecycle action token from the lifecycle hook -message, Senlin immediately deletes the node that was identified by the -node removal operation for deletion. Calling the 'complete lifecycle' API -endpoint also cancels the deferred node deletion initiated by the node removal -operation. - -Problem description -=================== - -When performing a scale-in operation with Senlin, an instance might require -custom cleanup. A lifecycle hook sends a notification that lets the receiving -application perform those custom clean-up steps on an instance before the node -is deleted. - -After the clean-up has finished, the application can wait for an expired -lifecycle hook timeout that automatically triggers the deletion of the nodes. -Alternatively, the application can send a 'complete lifecycle' message to -Senlin to proceed with the node deletion without waiting for the lifecycle -hook timeout to expire. - -Use Cases ---------- - -The typical use case occurs when a node must move its in-progress workload off -to another node before it can be terminated. During auto scale-in events, an -application must receive a Zaqar message to start those custom cleanups on -the termination-pending nodes. If the application does not complete the -lifecycle by a specified timeout, Senlin automatically deletes the node. If -the application finishes the cleanup before the specified timeout expires, -the application notifies Senlin to complete the lifecycle for a specified -node. This triggers the immediate deletion of the node. - -Proposed change -=============== - -1. **Deletion policy** - - New lifecycle hook specific properties: - - * timeout - * target type - * target name - -2. **New action status** - - WAITING_LIFECYCLE_COMPLETION - -3. **Scale-in, cluster-resize, cluster-node-del, node-delete actions** - - If deletion policy with lifecycle hook properties is attached, the above - actions differ from current implementation as follows: - - * For each node identified to be deleted: - - * DEL_NODE action is created with status as WAITING_LIFECYCLE_COMPLETION. - * Send a message to the target name from deletion policy. - The message contains: - - * lifecycle_action_token: same as DEL_NODE action ID - * node_id - - * Create dependencies between the DEL_NODE actions and the original action - - * Wait for dependent actions to complete or lifecycle timeout specified in - deletion policy to expire - - * If lifecycle timeout is reached: - - * For each DEL_NODE action: - - * If DEL_NODE action status is WAITING_LIFECYCLE_COMPLETION, then change - action status to READY - - * Call dispatcher.start_action - -4. **'Complete lifecycle' API endpoint** - - The new API endpoint to signal completion of lifecycle. It expects - lifecycle_action_token as a parameter. - - * Use lifecycle_action_token to load DEL_NODE action - * If DEL_NODE action status is WAITING_LIFECYCLE_COMPLETION, then change - action state to READY and call dispatcher.start_action - -Alternatives ------------- - -Alternatively, attach a deletion policy with a grace period. The grace -period allows an application to perform clean-up of instances. However, -Senlin must implement event notifications in form of a HTTP sink or a Zaqar -queue so that the third party application knows which nodes are selected for -deletion. - -This solution lacks the 'complete lifecycle' action allowing an application to -request the node deletion before the timeout expires. This is undesirable -because the scale-in action locks the cluster while it is sleeping for the -grace period value. This will not work if the application finishes the -clean-up of the instances before the grace period expires and it wants to -perform another cluster action such as scale-out. - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -* Complete Lifecycle Action - - :: - - POST /v1/clusters/{cluster_id}/actions - - Complete lifecycle action and trigger deletion of nodes. - - - Normal HTTP response code(s): - - =============== =========================================================== - Code Reason - =============== =========================================================== - 202 - Accepted Request was accepted for processing, but the processing has - not been completed. A 'location' header is included in the - response which contains a link to check the progress of the - request. - =============== =========================================================== - - - Expected error HTTP response code(s): - - ========================== =============================================== - Code Reason - ========================== =============================================== - 400 - Bad Request Some content in the request was invalid. - 401 - Unauthorized User must authenticate before making a request. - 403 - Forbidden Policy does not allow current user to do this - operation. - 404 - Not Found The requested resource could not be found. - 503 - Service Unavailable Service unavailable. This is mostly - caused by service configuration errors which - prevents the service from successful start up. - ========================== =============================================== - - - Request Parameters: - - ================================= ======= ======= ======================= - Name In Type Description - ================================= ======= ======= ======================= - OpenStack-API-Version (Optional) header string API microversion - request. - Takes the form of - OpenStack-API-Version: - clustering 1.0, where - 1.0 is the requested - API version. - cluster_id path string The name, UUID or - short-UUID of a cluster - object. - action body object A structured definition - of an action to be - executed. The object is - usually expressed as: - : { - : - - : - - ... - } - - The - indicates the requested - action while the - keys provide - the associated - parameters to the - action. Each - individual action - has its own set of - parameters. - - The action_name in the - request body has to be - complete_lifecycle. - lifecycle_action_token body UUID The UUID of the - lifecycle action to be - completed. - ================================= ======= ======= ======================= - - - Request example:: - - { - "complete_lifecycle": { - "lifecycle_action_token": "ffbb9175-d510-4bc1-b676-c6aba2a4ca81" - } - } - - - Response parameters: - - ================================= ======= ======= ======================= - Name In Type Description - ================================= ======= ======= ======================= - X-OpenStack-Request-ID (Optional) header string A unique ID for - tracking service - request. The request - ID associated with - the request by default - appears in the service - logs - Location header string For asynchronous object - operations, the - location header - contains a string - that can be interpreted - as a relative URI - from where users can - track the progress - of the action triggered - action body string A string - representation of - the action for - execution. - ================================= ======= ======= ======================= - -* Deletion Policy - - Additional properties specific to the lifecycle hook are added to the Deletion - policy. The existing properties from senlin.policy.deletion-1.0 are carried - over into senlin.policy.deletion-1.1 and not listed below. - - :: - - name: senlin.policy.deletion-1.1 - schema: - hooks: - description: Lifecycle hook properties - required: false - type: Map - updatable: false - schema: - type: - constraints: - - constraint: - - zaqar - - webhook - type: AllowedValues - default: zaqar - description: The type of lifecycle hook - required: false - type: String - updatable: false - params: - description: Specific parameters for the hook type - required: false - type: Map - updatable: false - schema: - queue: - description: Zaqar queue to receive lifecycle hook message - required: false - type: String - updatable: false - url: - description: Url sink to which to send lifecycle hook message - required: false - type: String - updatable: false - timeout: - description: Number of seconds before actual deletion happens - required: false - type: Integer - updatable: false - - -* Lifecycle Hook Message - - The lifecycle hook message is sent to the Zaqar queue when a scale_in - request is received and the cluster has the deletion policy with lifecycle - hook properties attached. It includes: - - ========================== ======= ======================================= - Name Type Description - ========================== ======= ======================================= - lifecycle_action_token UUID The action ID of the 'complete lifecycle' - action. - node_id UUID The cluster node ID to be terminated - lifecycle_transition_type string The type of lifecycle transition - ========================== ======= ======================================= - -Security impact ---------------- - -None - -Notifications impact --------------------- - -A new notification is sent to a specified Zaqar queue. - -Other end user impact ---------------------- - -The python-senlinclient requires modification to allow the user to perform -'complete lifecycle' action. - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -The openstacksdk requires modification to add the new 'complete -lifecycle' API endpoint. - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Tempest tests for the new API endpoint and policy will be added. - -Documentation Impact -==================== - -End User Guide needs to updated for new API endpoint, deletion policy changes -and behavior changes to scale-in, cluster-resize, cluster-node-del and -node-delete actions. - -References -========== - -None - - -History -======= - -None diff --git a/doc/specs/multiple-detection-modes.rst b/doc/specs/multiple-detection-modes.rst deleted file mode 100644 index 99a11ca33..000000000 --- a/doc/specs/multiple-detection-modes.rst +++ /dev/null @@ -1,317 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================= -Multiple polling detection modes in Health Policy -================================================= - -The health policy allows a user specify a detection mode to use for checking -node health. In the current implementation only one of the following detection -modes is allowed: - -* NODE_STATUS_POLLING -* NODE_STATUS_POLL_URL -* LIFECYCLE_EVENTS - -This spec proposes to let the user specify multiple polling detection modes in -the same health policy. E.g. the user can specify both NODE_STATUS_POLLING and -NODE_STATUS_POLL_URL detection modes in the same health policy. - - -Problem description -=================== - -The current implementation only allows a health policy to specify a single -detection mode to use for verifying the node health. However, there are -situations in which the user would want to have two detection modes checked and -only rebuild a node if both modes failed. Using multiple detection modes has the -benefit of fault tolerant health checks where one detection mode takes over in -case the other detection mode cannot be completed. - - -Use Cases ---------- - -As a user, I want to specify multiple polling detection modes for a given health -policy. The order of the polling detection modes used when creating the health -policy specifies the order of evaluation for the health checks. As a user, I also -want to be able to specify if a single detection mode failure triggers a node -rebuild or if all detection modes have to fail before a node is considered -unhealthy. - - -Proposed change -=============== - - -1. **Health Policy** - - Increment health policy version to 1.1 and implement the following schema: - -:: - - name: senlin.policy.health-1.1 - schema: - detection: - description: Policy aspect for node failure detection. - required: true - schema: - detection_modes: - description: List of node failure detection modes. - required: false - schema: - '*': - description: Node failure detection mode to try - required: false - schema: - options: - default: {} - required: false - schema: - poll_url: - default: '' - description: URL to poll for node status. See documentation for - valid expansion parameters. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: String - updatable: false - poll_url_conn_error_as_unhealthy: - default: true - description: Whether to treat URL connection errors as an indication - of an unhealthy node. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Boolean - updatable: false - poll_url_healthy_response: - default: '' - description: String pattern in the poll URL response body that - indicates a healthy node. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: String - updatable: false - poll_url_retry_interval: - default: 3 - description: Number of seconds between URL polling retries before - a node is considered down. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - poll_url_retry_limit: - default: 3 - description: Number of times to retry URL polling when its return - body is missing POLL_URL_HEALTHY_RESPONSE string before a node - is considered down. Required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - poll_url_ssl_verify: - default: true - description: Whether to verify SSL when calling URL to poll for - node status. Only required when type is 'NODE_STATUS_POLL_URL'. - required: false - type: Boolean - updatable: false - type: Map - updatable: false - type: - constraints: - - constraint: - - LIFECYCLE_EVENTS - - NODE_STATUS_POLLING - - NODE_STATUS_POLL_URL - type: AllowedValues - description: Type of node failure detection. - required: true - type: String - updatable: false - type: Map - updatable: false - type: List - updatable: false - interval: - default: 60 - description: Number of seconds between pollings. Only required when type is - 'NODE_STATUS_POLLING' or 'NODE_STATUS_POLL_URL'. - required: false - type: Integer - updatable: false - node_update_timeout: - default: 300 - description: Number of seconds since last node update to wait before checking - node health. - required: false - type: Integer - updatable: false - recovery_conditional: - constraints: - - constraint: - - ALL_FAILED - - ANY_FAILED - type: AllowedValues - default: ANY_FAILED - description: The conditional that determines when recovery should be performed - in case multiple detection modes are specified. 'ALL_FAILED' - means that all detection modes have to return failed health checks before - a node is recovered. 'ANY_FAILED' means that a failed health - check with a single detection mode triggers a node recovery. - required: false - type: String - updatable: false - type: Map - updatable: false - recovery: - description: Policy aspect for node failure recovery. - required: true - schema: - actions: - description: List of actions to try for node recovery. - required: false - schema: - '*': - description: Action to try for node recovery. - required: false - schema: - name: - constraints: - - constraint: - - REBOOT - - REBUILD - - RECREATE - type: AllowedValues - description: Name of action to execute. - required: true - type: String - updatable: false - params: - description: Parameters for the action - required: false - type: Map - updatable: false - type: Map - updatable: false - type: List - updatable: false - fencing: - description: List of services to be fenced. - required: false - schema: - '*': - constraints: - - constraint: - - COMPUTE - type: AllowedValues - description: Service to be fenced. - required: true - type: String - updatable: false - type: List - updatable: false - node_delete_timeout: - default: 20 - description: Number of seconds to wait for node deletion to finish and start - node creation for recreate recovery option. Required when type is 'NODE_STATUS_POLL_URL - and recovery action is RECREATE'. - required: false - type: Integer - updatable: false - node_force_recreate: - default: false - description: Whether to create node even if node deletion failed. Required - when type is 'NODE_STATUS_POLL_URL' and action recovery action is RECREATE. - required: false - type: Boolean - updatable: false - type: Map - updatable: false - - - -Alternatives ------------- - -None - - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -None - -Developer impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -dtruong@blizzard.com - -Work Items ----------- - -None - -Dependencies -============ - -None - - -Testing -======= - -Unit tests and tempest tests are needed to test multiple detection modes. - -Documentation Impact -==================== - -End User Guide needs to be updated to describe how multiple detection modes can -be set. - -References -========== - -None - -History -======= - -None diff --git a/doc/specs/rejected/README.rst b/doc/specs/rejected/README.rst deleted file mode 100644 index 331271e32..000000000 --- a/doc/specs/rejected/README.rst +++ /dev/null @@ -1,2 +0,0 @@ -This directory holds the feature proposals that have been rejected. These -files are archived here for references. diff --git a/doc/specs/template.rst b/doc/specs/template.rst deleted file mode 100644 index c5f150ea1..000000000 --- a/doc/specs/template.rst +++ /dev/null @@ -1,363 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Example Spec - The title of your blueprint -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/senlin/+spec/example - -Introduction paragraph -- why are we doing anything? A single paragraph of -prose that operators can understand. The title and this first paragraph -should be used as the subject line and body of the commit message -respectively. - -Some notes about the senlin spec and blueprint process: - -* Not all blueprints need a spec. A blueprint is primarily used for tracking - a series of changes which could be easy to implement and easy to review. - A spec, on the other hand, usually warrants a discussion among the - developers (and reviewers) before work gets started. - -* The aim of this document is first to define the problem we need to solve, - and second agree the overall approach to solve that problem. - -* This is not intended to be extensive documentation for a new feature. - For example, there is no need to specify the exact configuration changes, - nor the exact details of any DB model changes. But you should still define - that such changes are required, and be clear on how that will affect - upgrades. - -* You should aim to get your spec approved before writing your code. - While you are free to write prototypes and code before getting your spec - approved, its possible that the outcome of the spec review process leads - you towards a fundamentally different solution than you first envisaged. - -* API changes are held to a much higher level of scrutiny. As soon as an API - change merges, we must assume it could be in production somewhere, and as - such, we then need to support that API change forever. To avoid getting that - wrong, we do want lots of details about API changes upfront. - -Some notes about using this template: - -* Please wrap text at 79 columns. - -* The filename in the git repository should match the launchpad URL, for - example a URL of: https://blueprints.launchpad.net/senlin/+spec/some-thing - should be named ``some-thing.rst``. - -* Please do not delete any of the *sections* in this template. If you have - nothing to say for a whole section, just write: None - -* For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html - -* To test out your formatting, build the docs using tox and see the generated - HTML file in doc/build/html/specs/ - -* If you would like to provide a diagram with your spec, ascii diagrams are - required. http://asciiflow.com/ is a very nice tool to assist with making - ascii diagrams. The reason for this is that the tool used to review specs is - based purely on plain text. Plain text will allow review to proceed without - having to look at additional files which can not be viewed in gerrit. It - will also allow inline feedback on the diagram itself. - -* If your specification proposes any changes to the Nova REST API such as - changing parameters which can be returned or accepted, or even the semantics - of what happens when a client calls into the API, then you should add the - ``APIImpact`` flag to the commit message. Specs and patches with the - ``APIImpact`` flag can be found with the following query: - - https://review.openstack.org/#/q/status:open+project:openstack/senlin+message:apiimpact,n,z - - -Problem description -=================== - -A detailed description of the problem. What problem is this spec addressing? - -Use Cases ---------- - -What use cases does this address? -What are the impacts on actors (developer, end user, deployer etc.)? - -Proposed change -=============== - -Detail here the changes you propose to make with the scope clearly defined. - -At this point, if you would like to just get feedback on if the problem and -proposed change fit in senlin, you can stop here and post this for review to -get early feedback. - -Alternatives ------------- - -What are the other ways we could do this? Why aren't we using those? - -This doesn't have to be a full literature review, but it should demonstrate -that thought has been put into why the proposed solution is an appropriate one. - -Data model impact ------------------ - -What are the new data objects and/or database schema changes, if any? - -What database migrations will accompany this change? - -How will the initial set of new data objects be generated? -For example if you need to consider the existing resources or modify other -existing data, describe how that will work. - -REST API impact ---------------- - -For each API added/changed, clarify the followings: - -* Method Specification - - - A description of what the method does, suitable for use in user doc; - - - Method type (POST/PUT/PATCH/GET/DELETE) - - - Normal http response code(s) - - - Expected error http response code(s) - - + A description for each possible error code should be included describing - semantic errors which can cause it such as inconsistent parameters - supplied to the method, or when an object is not in an appropriate state - for the request to succeed. Errors caused by syntactic problems covered - by the JSON schema definition do not need to be included. - - - URL for the resource - - + URL should not include underscores, and use hyphens instead. - - - Parameters which can be passed via the URL - - - Request body definition in JSON schema, if any, with sample - - * Field names should use snake_case style, not CamelCase - - - Response body definition in JSON schema, if any, with sample - - * Field names should use snake_case style, not CamelCase - -* Policy changes to be introduced - - - Other things a deployer needs to think about when defining their policy. - -Note that the request/response schema should be defined as restrictively as -possible. Parameters which are required should be marked as such and only -under exceptional circumstances should additional parameters which are not -defined in the schema be permitted. - -Reuse of existing predefined parameter types such as regexps for passwords and -user defined names is highly encouraged. - -Security impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -* Does this change touch sensitive data such as tokens, keys, or user data? - -* Does this change alter the API in a way that may impact security, such as - a new way to access sensitive information or a new way to login? - -* Does this change involve cryptography or hashing? - -* Does this change require the use of sudo or any elevated privileges? - -* Does this change involve using or parsing user-provided data? This could - be directly at the API level or indirectly such as changes to a cache layer. - -* Can this change enable a resource exhaustion attack, such as allowing a - single API interaction to consume significant server resources? Examples - of this include launching subprocesses for each connection, or entity - expansion attacks in XML. - -For more detailed guidance, please see the OpenStack Security Guidelines as -a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These -guidelines are a work in progress and are designed to help you identify -security best practices. For further information, feel free to reach out -to the OpenStack Security Group at openstack-security@lists.openstack.org. - -Notifications impact --------------------- - -Please specify any changes to notifications, including: - -- adding new notification, -- changing an existing notification, or -- removing a notification. - -Other end user impact ---------------------- - -Aside from the API, are there other ways a user will interact with this -feature? - -* Does this change have an impact on python-senlinclient? - -* What does the user interface there look like? - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A periodic task manipulating a cluster node implies workload which will be - multiplied by the size of a cluster. - -* Any code interacting with backend services (e.g. nova or heat) may introduce - some latency which linear to the size of a cluster. - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -* Calls which result in a database queries can have a profound impact on - performance when called in critical sections of the code. - -* Will the change include any locking, and if so what considerations are there - on holding the lock? - -Other deployer impact ---------------------- - -Other impacts on how you deploy and configure OpenStack, such as: - -* What config options are being added? Should they be more generic than - proposed? Will the default values work well in real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -* If this change involves a new binary, how would it be deployed? - -* Please state anything that those doing continuous deployment, or those - upgrading from the previous release, need to be aware of. Also describe - any plans to deprecate configuration values or features. - -Developer impact ----------------- - -Discuss things that will affect other developers, such as: - -* If the blueprint proposes a change to the driver API, discussion of how - other drivers would implement the feature is required. - -* Does this change have an impact on openstacksdk? - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints, or in other - projects, that this one either depends on or is related to. - -* If this requires functionality of another project that is not currently - used by senlin, document that fact. - -* Does this feature require any new library dependencies or code otherwise - not included in OpenStack? Or does it depend on a specific version of - library? - - -Testing -======= - -Please discuss how the change will be tested, especially what tempest tests -will be added. It is assumed that unit test coverage will be added so that -doesn't need to be mentioned explicitly, but discussion of why you think -unit tests are sufficient and we don't need to add more tempest tests would -need to be included. - -Please discuss the important scenarios needed to test here, as well as -specific edge cases we should be ensuring work correctly. For each -scenario please specify if this requires a full openstack environment, or -can be simulated inside the senlin tree. - - -Documentation Impact -==================== - -Which audiences are affected most by this change, and which documentation -titles on docs.openstack.org should be updated because of this change? - -Don't repeat details discussed above, but reference them here in the context of -documentation for multiple audiences. For example, the Operations Guide targets -cloud operators, and the End User Guide would need to be updated if the change -offers a new feature available through the CLI or dashboard. If a config option -changes or is deprecated, note here that the documentation needs to be updated -to reflect this specification's change. - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related specifications as appropriate - -* Anything else you feel it is worthwhile to refer to - - -History -======= - -Optional section intended to be used each time the spec is updated to describe -new design, API or any database schema updated. Useful to let reader understand -what's happened along the time. - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Ocata - - Introduced diff --git a/doc/specs/workflow-recover.rst b/doc/specs/workflow-recover.rst deleted file mode 100644 index 7616787f3..000000000 --- a/doc/specs/workflow-recover.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Support Workflow Service as Recover Action -========================================== - - -Nowadays, Senlin supports many different actions for the purpose of cluster -management. Especially for auto-healing use case, Senlin provides check -and recover to support customizable loop by health policy. Where three -kinds of detection types can be chosen: NODE_STATUS_POLLING, LB_STATUS_POLLING, -VM_LIFECYCLE_EVENTS. Once any failure is detected of the given type, recover -action can be executed automatically or manually. Also in the health policy, -users can define list of actions under recovery category, which can be -applied in order on a failed node. - -Some simple recover actions can be embedded into the Senlin like rebuild, or -recreate. But some complex actions are a chain of simple actions. For an example, -evacuation of VM servers needs to verify if the targeted node can be evacuated, -then execute the action, and confirmation is often needed to check if the action -succeeds or not. To support these cases, this spec targets to extend Senlin -to integrate with mistral workflow service so as to trigger the user-defined -workflow for the recover options. - -Problem description -=================== - -This spec is to extend senlin to support mistral workflow for more complex -and customizable recover actions. - -Use Cases ---------- - -One typical use case is to allow users to introduce their own or existing -mistral workflow as an option of recover action, or special processing before -or after some given recover action. - -Proposed change -=============== - -The proposed change will include three parts: -* driver: to add mistral support into Senlin -* profile: to add workflow support as one of recover action. -* cloud/node_action: to support chain of actions defined as recover behaviour. -* health policy: The health policy spec will be changed to support workflow as - the recover action and include parameters needed to execute - the workflow. In the health policy, the workflow can also be - executed before or after some defined recover action. - Below is an example: - - recovery: - actions: - - name: REBUILD - - name: WORKFLOW - params: - workflow_name: node_migration - inputs: - host: Target_host - -* example: to add sample workflow definitions and health policy for Senlin -           users to create an end2end story. - -Alternatives ------------- - -None - -Data model impact ------------------ - -None - -REST API impact ---------------- - -None - -Security impact ---------------- - -None - -Notifications impact --------------------- - -None in the first version - -Other end user impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other deployer impact ---------------------- - -If there is mistral installed inside the same environment and the users want to leverage -the workflow functions, this spec provides support to integrate Senlin and mistral for -the auto-healing purpose. - -One thing worth more attention is that the debug and trouble shooting of the user workflow -is not in the scope of this integration. This spec targets to provide a channel for users -to bring into their own trusted workflow into the Senlin auto-healing loop and work together -with all the embedded ations. - -Developer impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -lxinhui@vmware.com - -Work Items ----------- - -The primary work items in Senlin will focus on adding a new driver for mistral and -implements of do_recover in profile. - -Dependencies -============ - -* Mistral: need to migrate the current APIs into the versioned. - -* Openstacksdk: need to support workflow service. - - -Testing -======= - -Unit tests will be provided. End2End test will be provided as examples for Senlin -users. - - -Documentation Impact -==================== - -None - -References -========== - -[1] Mistral patch about API migration: -    https://review.openstack.org/414755 -[2] Openstacksdk patch about the support of mistral service: -    https://review.openstack.org/414919 - -History -======= - -None - -.. list-table:: Revisions -   :header-rows: 1 - -   * - Release Name -     - Description -   * - Ocata -     - Introduced diff --git a/etc/senlin/README-senlin.conf.txt b/etc/senlin/README-senlin.conf.txt deleted file mode 100644 index 44bb0d68d..000000000 --- a/etc/senlin/README-senlin.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample senlin.conf file, run the following -command from the top level of the senlin directory: - -tox -egenconfig diff --git a/etc/senlin/api-paste.ini b/etc/senlin/api-paste.ini deleted file mode 100644 index e3a5327c1..000000000 --- a/etc/senlin/api-paste.ini +++ /dev/null @@ -1,48 +0,0 @@ - -# senlin-api pipeline -[pipeline:senlin-api] -pipeline = cors http_proxy_to_wsgi request_id faultwrap versionnegotiation osprofiler webhook authtoken context trust apiv1app - -[app:apiv1app] -paste.app_factory = senlin.api.common.wsgi:app_factory -senlin.app_factory = senlin.api.openstack.v1.router:API - -# Middleware to set x-openstack-request-id in http response header -[filter:request_id] -paste.filter_factory = oslo_middleware.request_id:RequestId.factory - -[filter:faultwrap] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:fault_filter - -[filter:context] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:context_filter -oslo_config_project = senlin - -[filter:versionnegotiation] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:version_filter - -[filter:trust] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:trust_filter - -[filter:webhook] -paste.filter_factory = senlin.api.common.wsgi:filter_factory -senlin.filter_factory = senlin.api.middleware:webhook_filter - -[filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory -oslo_config_project = senlin - -# Auth middleware that validates token against keystone -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = senlin diff --git a/examples/policies/WIP/batching_1_1_0.yaml b/examples/policies/WIP/batching_1_1_0.yaml deleted file mode 100644 index 70c43b7cf..000000000 --- a/examples/policies/WIP/batching_1_1_0.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Sample batching policy -type: senlin.policy.batching -version: 1.0 -description: A policy for generating batches for cluster operations. -properties: - # Min number of nodes in service when doing cluster-wide operations - min_in_service: 1 - - # Max number of nodes that can be operated simultaneously - max_batch_size: 1 - - # Number of seconds between batches - pause_time: 0 diff --git a/examples/policies/WIP/health_policy_lb.yaml b/examples/policies/WIP/health_policy_lb.yaml deleted file mode 100644 index 26c227dd1..000000000 --- a/examples/policies/WIP/health_policy_lb.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Sample health policy based on monitoring using LBaaS service -type: senlin.policy.health -version: 1.0 -description: A policy for maintaining node health from a cluster. -properties: - detection: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, LB_STATUS_POLLING, VM_EVENT_LISTENING - type: LB_STATUS_POLLING - - # Detailed specification for the checking type - options: - # Min time in seconds between regular connection of the member - deplay: 5 - - # Max time in seconds for a monitor to wait for a connection - # to establish before it times out - timeout: 10 - - # Predefined health monitor types, valid values include one of: - # PING, TCP, HTTP, HTTPS - type: HTTP - - # Number of permissible connection failures before changing the - # node status to INACTIVE - max_retries: 3 - - # HTTP method used for requests by the monitor of type HTTP - http_method: GET - - # List of HTTP status codes expected in response from a member - # to declare it healthy - expected_codes: [200] - - # HTTP path used in HTTP request by monitor for health testing - url_path: /health_status - - recovery: - # List of actions that can be retried on a failed node - actions: - - REBOOT - - REBUILD - - MIGRATE - - EVACUATE - - RECREATE - - # List of services that are to be fenced - fencing: - - COMPUTE - - STORAGE - - NETWORK diff --git a/examples/policies/WIP/lb_policy_aws.spec b/examples/policies/WIP/lb_policy_aws.spec deleted file mode 100644 index 78d53bfdb..000000000 --- a/examples/policies/WIP/lb_policy_aws.spec +++ /dev/null @@ -1,21 +0,0 @@ -# Sample load-balancing policy modled after AWS ELB load-balancer - -# TODO(Qiming): Rework this based on ELB spec -AvailabilityZones: [] -Instances: [] -Listeners: - - InstancePort: 80 - LoadBalancerPort: 80 - Protocol: HTTP - SSLCertificateId: MyCertificate - PolicyNames: - - PolicyA - - PolicyB -AppCookieStickinessPolicy: - - What -LBCookieStickienessPolicy: - - What -SecurityGroups: - - ssh_group -Subnets: - - private_sub_net_01 diff --git a/examples/policies/affinity_policy.yaml b/examples/policies/affinity_policy.yaml deleted file mode 100644 index cfa838d6c..000000000 --- a/examples/policies/affinity_policy.yaml +++ /dev/null @@ -1,8 +0,0 @@ -type: senlin.policy.affinity -version: 1.0 -properties: - servergroup: - name: web_servers - policies: anti-affinity - availability_zone: az01 - enable_drs_extension: false diff --git a/examples/policies/batch_policy.yaml b/examples/policies/batch_policy.yaml deleted file mode 100644 index dd2b9e594..000000000 --- a/examples/policies/batch_policy.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Sample batch policy that can be attached to a cluster -type: senlin.policy.batch -version: 1.0 -properties: - # Minimum number of nodes that should remain in service when - # performing actions like CLUSTER_UPDATE. - min_in_service: 1 - - # Maximum number of nodes that can be processed at the - # same time. - max_batch_size: 2 - - # Number of seconds between two consecutive batches of - # operations. A value of 0 means no pause time. - pause_time: 3 diff --git a/examples/policies/deletion_policy.yaml b/examples/policies/deletion_policy.yaml deleted file mode 100644 index 464162e35..000000000 --- a/examples/policies/deletion_policy.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Sample deletion policy that can be attached to a cluster. -type: senlin.policy.deletion -version: 1.0 -description: A policy for choosing victim node(s) from a cluster for deletion. -properties: - # The valid values include: - # OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM - criteria: OLDEST_FIRST - - # Whether deleted node should be destroyed - destroy_after_deletion: True - - # Length in number of seconds before the actual deletion happens - # This param buys an instance some time before deletion - grace_period: 60 - - # Whether the deletion will reduce the desired capacity of - # the cluster as well - reduce_desired_capacity: False diff --git a/examples/policies/deletion_policy_lifecycle_hook.yaml b/examples/policies/deletion_policy_lifecycle_hook.yaml deleted file mode 100644 index 4f5eea948..000000000 --- a/examples/policies/deletion_policy_lifecycle_hook.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Sample deletion policy that can be attached to a cluster. -type: senlin.policy.deletion -version: 1.1 -description: A policy for choosing victim node(s) from a cluster for deletion. -properties: - hooks: - # type of lifecycle hook - type: zaqar - params: - # Name of zaqar queue to receive lifecycle hook message - queue: zaqar_queue_name - # Length in number of seconds before the actual deletion happens - timeout: 180 - diff --git a/examples/policies/health_policy_event.yaml b/examples/policies/health_policy_event.yaml deleted file mode 100644 index 4e0bcf27e..000000000 --- a/examples/policies/health_policy_event.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Sample health policy based on VM lifecycle events -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health from a cluster. -properties: - detection: - detection_modes: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, NODE_STATUS_POLL_URL, LIFECYCLE_EVENTS - - type: LIFECYCLE_EVENTS - - recovery: - # Action that can be retried on a failed node, will improve to - # support multiple actions in the future. Valid values include: - # REBOOT, REBUILD, RECREATE - actions: - - name: RECREATE diff --git a/examples/policies/health_policy_poll.yaml b/examples/policies/health_policy_poll.yaml deleted file mode 100644 index 85b207330..000000000 --- a/examples/policies/health_policy_poll.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Sample health policy based on node health checking -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health from a cluster. -properties: - detection: - # Number of seconds between two adjacent checking - interval: 600 - - detection_modes: - # Type for health checking, valid values include: - # NODE_STATUS_POLLING, NODE_STATUS_POLL_URL, LIFECYCLE_EVENTS - - type: NODE_STATUS_POLLING - - recovery: - # Action that can be retried on a failed node, will improve to - # support multiple actions in the future. Valid values include: - # REBOOT, REBUILD, RECREATE - actions: - - name: RECREATE diff --git a/examples/policies/health_policy_poll_url.yaml b/examples/policies/health_policy_poll_url.yaml deleted file mode 100644 index 7ff91fd6d..000000000 --- a/examples/policies/health_policy_poll_url.yaml +++ /dev/null @@ -1,19 +0,0 @@ -type: senlin.policy.health -version: 1.1 -description: A policy for maintaining node health by polling a URL -properties: - detection: - interval: 120 - node_update_timeout: 240 - detection_modes: - - type: NODE_STATUS_POLL_URL - options: - poll_url: "http://myhealthservice/health/node/{nodename}" - poll_url_healthy_response: "passing" - poll_url_retry_limit: 3 - poll_url_retry_interval: 2 - recovery: - actions: - - name: RECREATE - node_delete_timeout: 90 - node_force_recreate: True diff --git a/examples/policies/lb_policy.yaml b/examples/policies/lb_policy.yaml deleted file mode 100644 index 06e1bcfbb..000000000 --- a/examples/policies/lb_policy.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Load-balancing policy spec using Neutron LBaaS service -type: senlin.policy.loadbalance -version: 1.1 -description: A policy for load-balancing the nodes in a cluster. -properties: - pool: - # Protocol used for load balancing - protocol: HTTP - - # Port on which servers are running on the members - protocol_port: 80 - - # Name or ID of subnet for the port on which members can be - # connected. - subnet: private-subnet - - # Valid values include: ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP - lb_method: ROUND_ROBIN - - session_persistence: - # type of session persistence, valid values include: - # SOURCE_IP, HTTP_COOKIE, APP_COOKIE, NONE - type: SOURCE_IP - # Name of cookie if type set to APP_COOKIE - cookie_name: whatever - - # ID of pool for the cluster on which nodes can be connected. - # id: - - vip: - # Name or ID of Subnet on which VIP address will be allocated - subnet: public-subnet - - # IP address of the VIP - # address:
- - # Max #connections per second allowed for this VIP - connection_limit: 500 - - # Protocol used for VIP - protocol: HTTP - - # TCP port to listen on - protocol_port: 80 - - health_monitor: - # The type of probe sent by the load balancer to verify the member state, - # can be PING, TCP, HTTP, or HTTPS. - type: 'PING' - - # The amount of time, in milliseconds, between sending probes to members. - delay: 10000 - - # The maximum time in milliseconds that a monitor waits to connect before - # it times out. This value must be less than the delay value. - timeout: 5000 - - # The number of allowed connection failures before changing the status - # of the member to INACTIVE. A valid value is from 1 to 10. - max_retries: 4 - - # The HTTP method that the monitor uses for requests. - http_method: 'GET' - - # The HTTP path of the request sent by the monitor to test the health of - # a member. A string value that must begin with the forward slash '/'. - url_path: '/index.html' - - # Expected HTTP codes for a passing HTTP(S) monitor. - expected_codes: '200, 202' - - # ID of the health manager for the loadbalancer. - # id: - - # Time in second to wait for loadbalancer to become ready before and after - # senlin requests lbaas V2 service for lb operations. - lb_status_timeout: 300 - - # Name or ID of loadbalancer for the cluster on which nodes can be connected. - # loadbalancer: diff --git a/examples/policies/placement_region.yaml b/examples/policies/placement_region.yaml deleted file mode 100644 index 823c0d074..000000000 --- a/examples/policies/placement_region.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Sample placement policy for cross-region placement -type: senlin.policy.region_placement -version: 1.0 -description: A policy for node placement across regions -properties: - regions: - - name: RegionOne - weight: 100 - cap: 150 - - name: RegionTwo - weight: 100 - cap: 200 diff --git a/examples/policies/placement_zone.yaml b/examples/policies/placement_zone.yaml deleted file mode 100644 index 194ef01cb..000000000 --- a/examples/policies/placement_zone.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Sample placement policy for cross-availability-zone placement -type: senlin.policy.zone_placement -version: 1.0 -description: A policy for node placement across availability zones -properties: - zones: - - name: zone1 - weight: 100 - - name: zone2 - weight: 100 diff --git a/examples/policies/scaling_policy.yaml b/examples/policies/scaling_policy.yaml deleted file mode 100644 index 3e87a1521..000000000 --- a/examples/policies/scaling_policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Sample scaling policy that can be attached to a cluster -type: senlin.policy.scaling -version: 1.0 -properties: - event: CLUSTER_SCALE_IN - adjustment: - # Adjustment type, valid values include: - # EXACT_CAPACITY, CHANGE_IN_CAPACITY, CHANGE_IN_PERCENTAGE - type: CHANGE_IN_CAPACITY - - # A number that will be interpreted based on the type setting. - number: 1 - - # When type is set CHANGE_IN_PERCENTAGE, min_step specifies - # that the cluster size will be changed by at least the number - # of nodes specified here. - min_step: 1 - - # When scaling operation will break the size limitation of - # cluster, whether to do best effort scaling, e.g. decrease - # cluster size to min_size or increase cluster size to max_size - # Default False means reject scaling request directly. - best_effort: True - - # Number of seconds before allowing the cluster to be resized again. - cooldown: 120 diff --git a/examples/profiles/README.rst b/examples/profiles/README.rst deleted file mode 100644 index 15d06e5be..000000000 --- a/examples/profiles/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -How To Use the Sample Spec File -=============================== - -This directory contains sample spec files that can be used to create a Senlin -profile using :command:`openstack cluster profile create` command, for example: - -To create an os.nova.server profile:: - - $ cd ./nova_server - $ openstack cluster profile create --spec-file cirros_basic.yaml my_server - -To create an os.heat.stack profile:: - - $ cd ./heat_stack/nova_server - $ openstack cluster profile create --spec-file heat_stack_nova_server.yaml my_stack - -To create a container.dockerinc.docker profile:: - - $ cd ./docker_container - $ openstack cluster profile create --spec-file docker_basic.yaml my_docker - -To get help on the command line options for creating profiles:: - - $ openstack help cluster profile create - -To show the profile created:: - - $ openstack cluster profile show diff --git a/examples/profiles/docker_container/docker_basic.yaml b/examples/profiles/docker_container/docker_basic.yaml deleted file mode 100644 index c19171c05..000000000 --- a/examples/profiles/docker_container/docker_basic.yaml +++ /dev/null @@ -1,11 +0,0 @@ -type: container.dockerinc.docker -version: 1.0 -properties: - #name: docker_container - image: hello-world - command: '/bin/sleep 30' - host_node: 58736d36-271a-47e7-816d-fb7927a7cd95 - host_cluster: b3283baf-c199-49fc-a5b7-f2b301b15a3d - port: 2375 - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml b/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml deleted file mode 100644 index 19e1c4064..000000000 --- a/examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml +++ /dev/null @@ -1,9 +0,0 @@ -type: os.heat.stack -version: 1.0 -properties: - name: nova_server_stack - template: nova_server_template.yaml - parameters: - server_name: my_cirros_server - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/nova_server/nova_server_template.yaml b/examples/profiles/heat_stack/nova_server/nova_server_template.yaml deleted file mode 100644 index fadc24aff..000000000 --- a/examples/profiles/heat_stack/nova_server/nova_server_template.yaml +++ /dev/null @@ -1,56 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - A HOT template that holds a VM instance with a Neutron port created in - given private network and a floatingIP created in given external network. - -parameters: - server_name: - type: string - description: Name for the instance to be created - default: my_server - flavor: - type: string - description: Flavor for the instance to be created - default: m1.tiny - image: - type: string - description: Name or ID of the image to use for the instance. - default: cirros-0.3.5-x86_64-disk - public_net: - type: string - description: ID or name of public network where floating IP to be created - default: public - private_net: - type: string - description: ID or name of private network into which servers get deployed - default: private - -resources: - my_server: - type: OS::Nova::Server - properties: - name: { get_param: server_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: server_port } - - server_port: - type: OS::Neutron::Port - properties: - network: { get_param: private_net } - - server_floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: public_net } - port_id: { get_resource: server_port } - -outputs: - server_private_ip: - description: IP address of my_server in private network - value: { get_attr: [ server_port, fixed_ips, 0, ip_address ] } - server_public_ip: - description: Floating IP address of my_server in public network - value: { get_attr: [ server_floating_ip, floating_ip_address ] } diff --git a/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml b/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml deleted file mode 100644 index d6cc8880b..000000000 --- a/examples/profiles/heat_stack/random_string/heat_stack_random_string.yaml +++ /dev/null @@ -1,7 +0,0 @@ -type: os.heat.stack -version: 1.0 -properties: - name: random_string_stack - template: random_string_template.yaml - context: - region_name: RegionOne diff --git a/examples/profiles/heat_stack/random_string/random_string_template.yaml b/examples/profiles/heat_stack/random_string/random_string_template.yaml deleted file mode 100644 index d146a7498..000000000 --- a/examples/profiles/heat_stack/random_string/random_string_template.yaml +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - str_length: - type: number - default: 64 -resources: - random: - type: OS::Heat::RandomString - properties: - length: {get_param: str_length} -outputs: - result: - value: {get_attr: [random, value]} diff --git a/examples/profiles/nova_server/cirros_basic.yaml b/examples/profiles/nova_server/cirros_basic.yaml deleted file mode 100644 index 125353b53..000000000 --- a/examples/profiles/nova_server/cirros_basic.yaml +++ /dev/null @@ -1,14 +0,0 @@ -type: os.nova.server -version: 1.0 -properties: - name: cirros_server - flavor: 1 - image: "cirros-0.4.0-x86_64-disk" - key_name: oskey - networks: - - network: private - metadata: - test_key: test_value - user_data: | - #!/bin/sh - echo 'hello, world' > /tmp/test_file diff --git a/install.sh b/install.sh deleted file mode 100755 index 4b3b99b7d..000000000 --- a/install.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash - -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" >&2 - exit 1 -fi - -# Install prefix for config files (e.g. "/usr/local"). -# Leave empty to install into /etc -CONF_PREFIX="" -LOG_DIR=/var/log/senlin - - -install -d $LOG_DIR - -detect_rabbit() { - PKG_CMD="rpm -q" - RABBIT_PKG="rabbitmq-server" - QPID_PKG="qpid-cpp-server" - - # Detect OS type - # Ubuntu has an lsb_release command which allows us to detect if it is Ubuntu - if lsb_release -i 2>/dev/null | grep -iq ubuntu - then - PKG_CMD="dpkg -s" - QPID_PKG="qpidd" - fi - if $PKG_CMD $RABBIT_PKG > /dev/null 2>&1 - then - if ! $PKG_CMD $QPID_PKG > /dev/null 2>&1 - then - return 0 - fi - fi - return 1 -} - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" - fi -} - -basic_configuration() { - conf_path=$1 - if echo $conf_path | grep ".conf$" >/dev/null 2>&1 - then - iniset $target DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - iniset $target database connection "mysql+pymysql://senlin:senlin@localhost/senlin?charset=utf8" - - BRIDGE_IP=127.0.0.1 - - if detect_rabbit - then - echo "rabbitmq detected, configuring $conf_path for rabbit" >&2 - iniset $conf_path DEFAULT rpc_backend kombu - iniset $conf_path oslo_messaging_rabbit rabbit_password guest - else - echo "qpid detected, configuring $conf_path for qpid" >&2 - iniset $conf_path DEFAULT rpc_backend qpid - fi - fi -} - -install_dir() { - local dir=$1 - local prefix=$2 - - for fn in $(ls $dir); do - f=$dir/$fn - target=$prefix/$f - if [ $fn = 'senlin.conf.sample' ]; then - target=$prefix/$dir/senlin.conf - fi - if [ -d $f ]; then - [ -d $target ] || install -d $target - install_dir $f $prefix - elif [ -f $target ]; then - echo "NOT replacing existing config file $target" >&2 - diff -u $target $f - else - echo "Installing $fn in $prefix/$dir" >&2 - install -m 664 $f $target - if [ $fn = 'senlin.conf.sample' ]; then - basic_configuration $target - fi - fi - done -} - -install_dir etc $CONF_PREFIX - -python setup.py install >/dev/null -rm -rf build senlin.egg-info diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml b/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml deleted file mode 100644 index 23cd57c2c..000000000 --- a/releasenotes/notes/Switch-to-alembic-migrations-f442d0b58c3f13a6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Senlin will now use Alembic migrations for database schema updates. diff --git a/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml b/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml deleted file mode 100644 index 82d8b96f9..000000000 --- a/releasenotes/notes/Updated-for-SQLAlchemy-2.x-ee6831e5a95d3658.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed compatibility issues with SQLAlchemy 2.x. diff --git a/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml b/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml deleted file mode 100644 index 648378db3..000000000 --- a/releasenotes/notes/acess-control-admin-project-762c8e91e8875738.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Supported admin user can see details of any cluster profile. diff --git a/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml b/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml deleted file mode 100644 index 6861ebbd2..000000000 --- a/releasenotes/notes/action-policy-optimization-06ea45eb3dcbe33a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - The retrieval of some resources such as actions and policies are optimized - to avoid object instantiation. diff --git a/releasenotes/notes/action-purge-11db5d8018b8389a.yaml b/releasenotes/notes/action-purge-11db5d8018b8389a.yaml deleted file mode 100644 index 66dfec49a..000000000 --- a/releasenotes/notes/action-purge-11db5d8018b8389a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A ``action_purge`` subcommand is added to ``senlin-manage`` tool for purging actions - from the actions table. diff --git a/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml b/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml deleted file mode 100644 index d20ff1d99..000000000 --- a/releasenotes/notes/action-update-api-fc51b1582c0b5902.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - [`blueprint action-update `_] - A new action update API is added to allow the action status to be updated. - The only valid status value for update is CANCELLED. - diff --git a/releasenotes/notes/add-action-filter-40e775a26082f780.yaml b/releasenotes/notes/add-action-filter-40e775a26082f780.yaml deleted file mode 100644 index 84f3fd02d..000000000 --- a/releasenotes/notes/add-action-filter-40e775a26082f780.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - Add cluster_id as a parameter in query action APIs. - This allow we can filter result returned from API instead by received - so many result action. diff --git a/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml b/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml deleted file mode 100644 index 5c2d373fa..000000000 --- a/releasenotes/notes/add-availability_zone-option-to-loadbalancer-74b512fb0c138bfe.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add availability_zone option for loadbalancers. This is supported by - Octavia starting in the Ussuri release. \ No newline at end of file diff --git a/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml b/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml deleted file mode 100644 index c4fe99cb9..000000000 --- a/releasenotes/notes/affinity-policy-fix-72ae92dc8ffcff00.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed a bug in affinity policy where the calls to nova driver was wrong. diff --git a/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml b/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml deleted file mode 100644 index 5403a43d4..000000000 --- a/releasenotes/notes/api-ref-fixes-19bc963430c32ecf.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The new API documentation include fixes to the header like 'location', - 'OpenStack-Request-Id' and responses during version negotiation. diff --git a/releasenotes/notes/az-info-9344b8d54c0b2665.yaml b/releasenotes/notes/az-info-9344b8d54c0b2665.yaml deleted file mode 100644 index 1d896c0fb..000000000 --- a/releasenotes/notes/az-info-9344b8d54c0b2665.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The bug where the availability zone info from a nova server deployment - was not available has been fixed. diff --git a/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml b/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml deleted file mode 100644 index 5fcf48999..000000000 --- a/releasenotes/notes/batch-scheduling-ca5d98d41fc72973.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Improved the action scheduler so that it can decide how many node - actions will be fired in each batch. Batch control is a throttling - measure to avoid raising too many requests in a short interval to - the backend services. diff --git a/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml b/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml deleted file mode 100644 index 76a4122da..000000000 --- a/releasenotes/notes/bdmv2-fix-b9ff742cdc282087.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The UUID used by the block_device_mapping_v2 in nova.server profile is - validated. diff --git a/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml b/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml deleted file mode 100644 index 0b236c76d..000000000 --- a/releasenotes/notes/bug-1789488-75ee756a53722cd1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1789488 `_] - Perform deep validation of profile and policy schemas so that errors in - spec properties are detected. diff --git a/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml b/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml deleted file mode 100644 index b93ac9b5e..000000000 --- a/releasenotes/notes/bug-1811161-c6416ad27ab0a2ce.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1811161 `_] - Perform policy post-op even if action failed. This allows the health policy - to reenable health checks even if an action that failed. diff --git a/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml b/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml deleted file mode 100644 index 855aef837..000000000 --- a/releasenotes/notes/bug-1811294-262d4b9cced3f505.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - [`bug 1811294 `_] - Set owner field for actions created to wait for lifecycle completion. This - allows these actions to be cleaned up when the engine is restarted. diff --git a/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml b/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml deleted file mode 100644 index dded63707..000000000 --- a/releasenotes/notes/bug-1813089-db57e7bdfd3983ac.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - [`bug 1813089 `_] - This change picks the address when adding a node to a load balancer based on - the subnet ip version. This fix adds supports for nodes with - dual stack network. diff --git a/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml b/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml deleted file mode 100644 index 5b6d316af..000000000 --- a/releasenotes/notes/bug-1815540-2664a975db5fafc8.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - [`bug 1815540 `_] - Cluster recovery and node recovery API request bodies are changed to only accept a - single operation. Optional parameters for this operation are set in operation_params. diff --git a/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml b/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml deleted file mode 100644 index 21b4d2c1f..000000000 --- a/releasenotes/notes/bug-1817379-23dd2c925259d5f2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - [`bug 1817379 `_] - Delete ports before recovering a node. diff --git a/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml b/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml deleted file mode 100644 index 8a5089cc8..000000000 --- a/releasenotes/notes/bug-1817604-41d4b8f6c6f920e4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - [`bug 1817604 `_] - Fixes major performance bugs within senlin by improving database - interaction. This was completed by updating the database models to - properly take advantage of relationships. Additionally removes - unnecessary database calls and prefers joins instead to retrieve - object data. diff --git a/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml b/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml deleted file mode 100644 index d16c12710..000000000 --- a/releasenotes/notes/bug-1828856-bf7a30a6eb00238a.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -fixes: - - | - Fixes bug where the webhook rejected additional parameters in the body for - mircoversion less than 1.10. Now with new webhook version 2, additional - parameters in the body will always be accepted regardless of the - microversion API passed in. -other: - - | - Introduces webhook version 2 that is returned when creating new webhook - receivers. Webhook version 1 receivers are still valid and will - continue to be accepted. diff --git a/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml b/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml deleted file mode 100644 index 75fea5cc7..000000000 --- a/releasenotes/notes/bug-2048099-74f0ca874cfbe6b4.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Addresses an issue related to the SCALE_IN action. The bug caused - the removal of only one node from the load balancer even when the - count of inputs was greater than 1. diff --git a/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml b/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml deleted file mode 100644 index ee6043c90..000000000 --- a/releasenotes/notes/bug-2048100-6b4156df956a6f14.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fix incorrect handling of actions causing node reduction in cluster - and load balancer with desired_capacity = min_size. The node remains in - the cluster, and its IP is no longer removed from the load balancer. diff --git a/releasenotes/notes/bug-2048452-8a690353815601a0.yaml b/releasenotes/notes/bug-2048452-8a690353815601a0.yaml deleted file mode 100644 index f663cdb9a..000000000 --- a/releasenotes/notes/bug-2048452-8a690353815601a0.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - [`bug 2048452 `_] - Fixed a bug where `TrustMiddleware` unable to fetch trusts/credentials - from Identity service, may be related to: - https://bugs.launchpad.net/keystone/+bug/1959674 - This bug is fixed by using `admin_token` instead of `token` auth method - to fetch trusts/credentials from Identity service. diff --git a/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml b/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml deleted file mode 100644 index e5165ba2b..000000000 --- a/releasenotes/notes/bug-2048726-a830a7838661a41f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed a bug where exception raised in the `_resolve_bdm` method of the - `senlin.profile.os.nova.server` profile would cause cluster cannot create - new nodes. diff --git a/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml b/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml deleted file mode 100644 index 5895abf8c..000000000 --- a/releasenotes/notes/bug-2049191-8ee2d8352b05cfef.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Excluding CLUSTER_RESIZE from the list of actions that skip pre-op checks - if the cluster is already at the minimum threshold. When the cluster is at - the minimum threshold, pre-operation LB will function with actions such as - CLUSTER_DEL_NODES, CLUSTER_SCALE_IN, NODE_DELETE, and will skip actions - like CLUSTER_REPLACE_NODES and CLUSTER_RESIZE. diff --git a/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml b/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml deleted file mode 100644 index 0785600ec..000000000 --- a/releasenotes/notes/capacity-calculation-4fd389ff12107dfb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - Fixed bug related to the desired_capacity calculation. The base number used - now is the current capacity of the cluster instead of previous 'desired' - capacity. This include all actions that change cluster capacity and all - related policies. diff --git a/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml b/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml deleted file mode 100644 index 4f2495359..000000000 --- a/releasenotes/notes/clean-actions-for-cluster-node-438ca5268e7fd258.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - When a cluster or a node is deleted, the action records associated with - them are now automatically deleted from database. diff --git a/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml b/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml deleted file mode 100644 index 6391e7c1a..000000000 --- a/releasenotes/notes/cluster-action-refresh-9eeb60f1f2c1d0abr.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a cluster entity refresh to the cluster action execute wrapper which will make sure the - state of the action does not become stale while in queue. diff --git a/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml b/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml deleted file mode 100644 index 1f471048c..000000000 --- a/releasenotes/notes/cluster-check-interval-b01e8140cc83760e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - A new configuration option check_interval_max is added (default=3600) for - cluster health check intervals. diff --git a/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml b/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml deleted file mode 100644 index bbfa23cb7..000000000 --- a/releasenotes/notes/cluster-collect-90e460c7bfede347.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - A new ``cluster_collect`` API is added. diff --git a/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml b/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml deleted file mode 100644 index aa977743b..000000000 --- a/releasenotes/notes/cluster-delete-conflict-94261706eb29e9bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The cluster delete API calls may return a 409 status code if there - are policies and/or receivers associated with it. Previously, we return - a 400 status code. diff --git a/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml b/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml deleted file mode 100644 index 918710022..000000000 --- a/releasenotes/notes/cluster-delete-with-policy-d2dca161e42ee6ba.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -prelude: > - Updated tests to work with updated cluster delete. -features: - - | - Allows the cluster delete actions to detach policies and delete receivers - for the cluster being deleted. This simplifies deleting clusters by not - having to detach or delete all dependancies from it beforehand. diff --git a/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml b/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml deleted file mode 100644 index 8bda99aa6..000000000 --- a/releasenotes/notes/cluster-desired-capacity-d876347f69b04b4f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - The 'desired_capacity' reflects the expectation from a requester's view - point. The engine now changes the 'desired_capacity' after the request - is validated/sanitized, before the action is actually implemented. This - means the 'desired_capacity' will change event if an action fails. diff --git a/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml b/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml deleted file mode 100644 index d2276eeef..000000000 --- a/releasenotes/notes/cluster-lock-e283fb9bf1002bca.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed cluster lock primary key conflict problem. diff --git a/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml b/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml deleted file mode 100644 index a82188a3f..000000000 --- a/releasenotes/notes/cluster-node-dependents-3bdbebd773d276d1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added dependents to clusters and nodes for recording other clusters/nodes - that depend on them. diff --git a/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml b/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml deleted file mode 100644 index c0505892b..000000000 --- a/releasenotes/notes/cluster-node-status-e7fced162b415452.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed cluster/node status setting after a cluster/node check operation. diff --git a/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml b/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml deleted file mode 100644 index 7c25e9509..000000000 --- a/releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new API "cluster-op" is introduced to trigger a profile type specific - operation on all nodes in a cluster. This API is available since API - micro-version 1.4. diff --git a/releasenotes/notes/cluster-recover-d87d429873b376db.yaml b/releasenotes/notes/cluster-recover-d87d429873b376db.yaml deleted file mode 100644 index 4c8ed58c5..000000000 --- a/releasenotes/notes/cluster-recover-d87d429873b376db.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed cluster-recover operation in engine so that it accepts parameters - from API requests in addition to policy decision (if any). diff --git a/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml b/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml deleted file mode 100644 index 36666766d..000000000 --- a/releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed a bug related to oslo.versionedobjects change that prevents cluster - actions to be properly encoded in JSON requests. diff --git a/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml b/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml deleted file mode 100644 index dbed938d5..000000000 --- a/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -prelude: > - This release alters the cluster_scale_in and cluster_scale_out actions to - no longer place the action into the actions table when a conflict is - detected. This behavior is an improvement on the old way actions are - processed as the requester will now receive immediate feedback from the - API when an action cannot be processed. This release also honors the - scaling action cooldown in the same manner by erring via the API when a - scaling action cannot be processed due to cooldown. -features: - - | - [`blueprint scaling-action-acceptance `_] - Scaling actions (IN or OUT) now validate that there is no conflicting - action already being processed and will return an error via the API - informing the end user if a conflict is detected. A conflicting action is - detected when new action of either `CLUSTER_SCALE_IN` or - `CLUSTER_SCALE_OUT` is attempted while there is already cluster scaling - action in the action table in a pending status (READY, RUNNING, WAITING, - ACTION_WAITING_LIFECYCLE_COMPLETION). - Additionally the cooldown will be checked and enforced when a scaling - action is requested. If the cooldown is being observed the requester will - be informed of this when submitting the action via an error. diff --git a/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml b/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml deleted file mode 100644 index 4344eee01..000000000 --- a/releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Fixed cluster status update logic so that cluster status is solely - determined by the status of its member nodes. The status is updated - each time a cluster operation has completed. diff --git a/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml b/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml deleted file mode 100644 index 26926c104..000000000 --- a/releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The senlin-engine now supports fencing a corrupted VM instance by deleting - it forcibly. diff --git a/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml b/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml deleted file mode 100644 index 66846c32a..000000000 --- a/releasenotes/notes/config-default-nova-timeout-f0bd73811ac3a8bb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a new config option to specify the timeout for Nova API calls. \ No newline at end of file diff --git a/releasenotes/notes/config-doc-cb8b37e360422301.yaml b/releasenotes/notes/config-doc-cb8b37e360422301.yaml deleted file mode 100644 index 562a703cc..000000000 --- a/releasenotes/notes/config-doc-cb8b37e360422301.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Senlin API/Engine configuration options are now documented and published - online. diff --git a/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml b/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml deleted file mode 100644 index 25cec4ec9..000000000 --- a/releasenotes/notes/config-scheduler-thread-pool-size-de608624a6cb4b43r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a scheduler thread pool size. diff --git a/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml b/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml deleted file mode 100644 index 0fd39bfad..000000000 --- a/releasenotes/notes/config-stop-node-before-delete-4ab08e61b40e4474.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added a new boolean cluster config option to stop node before delete for all cluster. diff --git a/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml b/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml deleted file mode 100644 index ee8e941b0..000000000 --- a/releasenotes/notes/config-trust-roles-416e26e03036ae40.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added a new list config option to allow trust roles to be overridden. - diff --git a/releasenotes/notes/container-ops-e57d096742202206.yaml b/releasenotes/notes/container-ops-e57d096742202206.yaml deleted file mode 100644 index e05d3f791..000000000 --- a/releasenotes/notes/container-ops-e57d096742202206.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Docker container profile now supports operations like restart, pause and - unpause. diff --git a/releasenotes/notes/container-profile-152bf2908c70ffad.yaml b/releasenotes/notes/container-profile-152bf2908c70ffad.yaml deleted file mode 100644 index e9469ba95..000000000 --- a/releasenotes/notes/container-profile-152bf2908c70ffad.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A new profile type 'container.dockerinc.docker-1.0' is added to support - creation and management of docker clusters. This is still an experimental - feature. Please use with caution. diff --git a/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml b/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml deleted file mode 100644 index 9a64b6914..000000000 --- a/releasenotes/notes/db-action-retries-d471fe85b4510afd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - DB layer operations now feature some retries if there are transient errors. diff --git a/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml b/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml deleted file mode 100644 index 5442a192d..000000000 --- a/releasenotes/notes/db-ignore-project_safe-for-admins-2986f15e74cd1d1c.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -features: - - | - Admin role users can now access and modify all resources (clusters, nodes, - etc) regardless of which project that belong to. -security: - - | - Removed the restriction for admin role users that prevented access/changes - to resources (clusters, nodes, etc) belonging to projects not matching the - project used for authentication. Access for non-admin users is still - isolated to their project used for authentication. diff --git a/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml b/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml deleted file mode 100644 index 729204ebe..000000000 --- a/releasenotes/notes/db-locking-logic-9c97b04ce8c52989.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -Fixes: - - | - Fixed db locking logic to avoid deadlock. \ No newline at end of file diff --git a/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml b/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml deleted file mode 100644 index 0090c2a57..000000000 --- a/releasenotes/notes/db-retries-da4a0d9d83ad56bb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - All REST calls that involve a DB interaction are now automatically retried - upon deadlock exceptions. diff --git a/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml b/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml deleted file mode 100644 index eba0eb465..000000000 --- a/releasenotes/notes/delete-batch-a16ee5ed2512eab7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - | - The support to CLUSTER_DELETE action from the experimental batch policy - is dropped due to issues on cluster locking. This could be resurected - in future when a proper workaround is identified. diff --git a/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml b/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml deleted file mode 100644 index 6f8f2729d..000000000 --- a/releasenotes/notes/delete_with_dependants-823c6c4921f22575.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Allow the cluster delete action to detach policies and delete receivers instead - of erroring. diff --git a/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml b/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml deleted file mode 100644 index b2a2addc7..000000000 --- a/releasenotes/notes/deletion-policy-11bcb7c0e90bbfcc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in the built-in deletion policy which failed to process - NODE_DELETE action. diff --git a/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml b/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml deleted file mode 100644 index 574fc7b38..000000000 --- a/releasenotes/notes/deletion-policy-node-delete-dc70da377b2a4f77.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The deletion policy is enhanced to handle 'NODE_DELETE' actions which - derives from a standalone 'node_delete' request. diff --git a/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml b/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml deleted file mode 100644 index c9c530004..000000000 --- a/releasenotes/notes/deprecate-json-formatted-policy-file-0c29555b3ea0c984.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -upgrade: - - | - The default value of ``[oslo_policy] policy_file`` config option has - been changed from ``policy.json`` to ``policy.yaml``. - Operators who are utilizing customized or previously generated - static policy JSON files (which are not needed by default), should - generate new policy files or convert them in YAML format. Use the - `oslopolicy-convert-json-to-yaml - `_ - tool to convert a JSON to YAML formatted policy file in - backward compatible way. -deprecations: - - | - Use of JSON policy files was deprecated by the ``oslo.policy`` library - during the Victoria development cycle. As a result, this deprecation is - being noted in the Wallaby cycle with an anticipated future removal of support - by ``oslo.policy``. As such operators will need to convert to YAML policy - files. Please see the upgrade notes for details on migration of any - custom policy files. diff --git a/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml b/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml deleted file mode 100644 index e86fdbb98..000000000 --- a/releasenotes/notes/destroy-nodes-after-remove-37bffdc35a9b7a96.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new, optional parameter "destroy_after_deletion" is added to the - cluster-del-nodes request since API micro-version 1.4. diff --git a/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml b/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml deleted file mode 100644 index ef88c65fc..000000000 --- a/releasenotes/notes/doc-fixes-0783e8120b61299br.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed the example of "aodh alarm create" command. diff --git a/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml b/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml deleted file mode 100644 index 4ba8a350a..000000000 --- a/releasenotes/notes/doc-fixes-5057bf93464810cc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -fixes: - - | - Various fixes to the user doc, developer doc and API documentation. - Fixed api-ref and docs building. - Fixed keystone_authtoken config in docs. - Updated docs and examples for health policy v1.1. - Updated api-ref location. - Updated Cirros Example file. - diff --git a/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml b/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml deleted file mode 100644 index 09967462c..000000000 --- a/releasenotes/notes/doc-fixes-685c64d1ef509041.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Senlin API/Function/Integration test were moved to senlin-tempest-plugin project before, fixed - doc for this change. diff --git a/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml b/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml deleted file mode 100644 index 2ae931e40..000000000 --- a/releasenotes/notes/doc-fixes-cd8c7006f8c66387.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Various fixes to the user doc, developer doc and API documentation. diff --git a/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml b/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml deleted file mode 100644 index 5124344d7..000000000 --- a/releasenotes/notes/doc-fixes-e60bb1a486f67e0c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Various bug fixes to the user manual and sample profiles/policies. diff --git a/releasenotes/notes/docker-reboot-999ec624186864e3.yaml b/releasenotes/notes/docker-reboot-999ec624186864e3.yaml deleted file mode 100644 index 4f678f5ca..000000000 --- a/releasenotes/notes/docker-reboot-999ec624186864e3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed an error when restarting a docker container node. diff --git a/releasenotes/notes/docker-start-c850c256c6149f4f.yaml b/releasenotes/notes/docker-start-c850c256c6149f4f.yaml deleted file mode 100644 index 2453dc4e4..000000000 --- a/releasenotes/notes/docker-start-c850c256c6149f4f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to start a docker container. diff --git a/releasenotes/notes/docker-update-1b465241ca78873c.yaml b/releasenotes/notes/docker-update-1b465241ca78873c.yaml deleted file mode 100644 index 16947e922..000000000 --- a/releasenotes/notes/docker-update-1b465241ca78873c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Supported update name operation for docker profile. diff --git a/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml b/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml deleted file mode 100644 index e63550121..000000000 --- a/releasenotes/notes/drop-py-2-7-154eeefdc9886091.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -upgrade: - - | - Python 2.7 support has been dropped. Last release of Senlin - to support python 2.7 is OpenStack Train. The minimum version of Python now - supported by Senlin is Python 3.6. diff --git a/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml b/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml deleted file mode 100644 index e5c87438a..000000000 --- a/releasenotes/notes/drop-py34-support-21e20efb9bf0b326.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - | - The support to py3.4 is dropped. Please use py3.5 instead. diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml deleted file mode 100644 index 3d5e4e3d3..000000000 --- a/releasenotes/notes/drop-python-3-6-and-3-7-3a90d172a5e43660.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - | - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now - supported is Python 3.8. \ No newline at end of file diff --git a/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml b/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml deleted file mode 100644 index be25162aa..000000000 --- a/releasenotes/notes/dynamic-timer-67f053499f4b32e2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The health manager is improved to use dynamic timers instead of fix - interval timers when polling cluster's status. diff --git a/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml b/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml deleted file mode 100644 index 00320ada1..000000000 --- a/releasenotes/notes/enforce-multi-tenancy-ee27b9bfec7ba405.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security: - - Multi-tenancy is enhanced so that an admin role user has to respect - project isolation unless explicitly asking for an exception. diff --git a/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml b/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml deleted file mode 100644 index 51951fc6a..000000000 --- a/releasenotes/notes/error-messages-bd8b5a6d12e2c4af.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Error messages returned from API requests are now unified. All parameter - validation failures of the same reason returns a similar message. diff --git a/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml b/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml deleted file mode 100644 index 9c16210f5..000000000 --- a/releasenotes/notes/event-for-derived-actions-8bd44367fa683dbc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A configuration option "exclude_derived_actions" is introduced into the - "dispatchers" group for controlling whether derived actions should lead - into event notifications and/or DB records. diff --git a/releasenotes/notes/event-list-b268bb778efa9ee1.yaml b/releasenotes/notes/event-list-b268bb778efa9ee1.yaml deleted file mode 100644 index bd472df45..000000000 --- a/releasenotes/notes/event-list-b268bb778efa9ee1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - New logics added to event-list operation so that users can specify the - name or short-id of a cluster for filtering. diff --git a/releasenotes/notes/event-notification-eda06b43ce17a081.yaml b/releasenotes/notes/event-notification-eda06b43ce17a081.yaml deleted file mode 100644 index c76fb511d..000000000 --- a/releasenotes/notes/event-notification-eda06b43ce17a081.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The engine has been augmented to send event notifications only when a node - is active and it has a physical ID associated. This is targeting at the - lifecycle hooks and possibly other notifications. diff --git a/releasenotes/notes/event-purge-db868a063e18eafb.yaml b/releasenotes/notes/event-purge-db868a063e18eafb.yaml deleted file mode 100644 index fd2de9ff8..000000000 --- a/releasenotes/notes/event-purge-db868a063e18eafb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A event_purge subcommand is added to senlin-manage tool for purging events - generated in a specific project. diff --git a/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml b/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml deleted file mode 100644 index 1b1f0f5d0..000000000 --- a/releasenotes/notes/event-table-change-dcb42c8b6d145fec.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - DB columns obj_id, obj_type and obj_name in the event table are now - renamed to oid, otype and oname correspondingly. diff --git a/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml b/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml deleted file mode 100644 index a8d100553..000000000 --- a/releasenotes/notes/fail-fast-on-locked-resource-eee28572dc40009a.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -prelude: > - This release alters the behavior of cluster and node APIs which create, update or - delete either resource. In the previous release those API calls would be - accepted even if the target resource was already locked by another action. - The old implementation would wait until the other action released the lock - and then continue to execute the desired action. With the new implementation - any API calls for cluster or node that modify said resource will be rejected - with 409 conflict. -features: - - | - [`blueprint fail-fast-locked-resource `_] - POST, PATCH or DELETE API calls for clusters or nodes that require a lock are - rejected with 409 resource conflict if another action is already holding a - lock on the target resource. diff --git a/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml b/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml deleted file mode 100644 index 2157fa84c..000000000 --- a/releasenotes/notes/fix-action-triggering-e880b02234028315.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - When an action was marked as RETRY, its status is reset to READY for a - reschedule. A bug related to this behavior is now fixed. diff --git a/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml b/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml deleted file mode 100644 index a45d4e611..000000000 --- a/releasenotes/notes/fix-aodh-integration-41e69276158ad233.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - | - The API microversion 1.10 has fixed the webhook trigger API for easier - integration with Aodh. In previous microversions, the query parameters - are used as action inputs. Starting from 1.10, the key-value pairs in the - request body are also considered as request inputs. diff --git a/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml b/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml deleted file mode 100644 index b55b4c06e..000000000 --- a/releasenotes/notes/fix-cluster-index-ae0060b6337d6d55.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fix cluster next_index update when adding nodes to cluster. diff --git a/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml b/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml deleted file mode 100644 index e1ac5ab9c..000000000 --- a/releasenotes/notes/fix-cooldown-5082711989ecd536.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed immature return from policy cooldown check. diff --git a/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml b/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml deleted file mode 100644 index 00257a2e8..000000000 --- a/releasenotes/notes/fix-db-deadlock-1d2bdb9ce785734a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed DB layer dead lock issue that surfaced recently during concurrent - DB operations. diff --git a/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml b/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml deleted file mode 100644 index ab830c56b..000000000 --- a/releasenotes/notes/fix-delete-apis-bf9f47b5fcf8f3e6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed resource delete operations which should return 204 status code with - body length of zero. diff --git a/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml b/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml deleted file mode 100644 index a9e664eb5..000000000 --- a/releasenotes/notes/fix-delete-node-error-31575d62bc9375ec.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug when deleteing node error. diff --git a/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml b/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml deleted file mode 100644 index acc5bc382..000000000 --- a/releasenotes/notes/fix-desired-when-omitted-e7ffc0aa72ab8cc9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed a bug related to desired_capacity when creating a cluster. The old - behavior was having it default to 1, however, the correct behavior should - be having it default to min_size if provided. diff --git a/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml b/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml deleted file mode 100644 index e9b264da0..000000000 --- a/releasenotes/notes/fix-dup-of-action-dump-0b95a07adf3ccdba.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed a problem related to duplicated event dumps during action execution. diff --git a/releasenotes/notes/fix-health-check-5d77795885676661.yaml b/releasenotes/notes/fix-health-check-5d77795885676661.yaml deleted file mode 100644 index 05d78d803..000000000 --- a/releasenotes/notes/fix-health-check-5d77795885676661.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug in health checking which was introduced by oslo.context hanges. diff --git a/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml b/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml deleted file mode 100644 index f66d2c5e7..000000000 --- a/releasenotes/notes/fix-health-cluster-check-5ce1c0309c03c5d5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed when cluster doing resize/scale create nodes, and physcical id of this - nodes not found, the cluster will still can do health check. diff --git a/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml b/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml deleted file mode 100644 index 70cc25483..000000000 --- a/releasenotes/notes/fix-health-mgr-opts-99898614f37c5d74.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed the problem that health manager related configuration options were - not properly exposed. diff --git a/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml b/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml deleted file mode 100644 index 16fa17011..000000000 --- a/releasenotes/notes/fix-health-policy-bind-9b6ed0e51939eac3.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug when checking if health policy is attached already. diff --git a/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml b/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml deleted file mode 100644 index 34a74d349..000000000 --- a/releasenotes/notes/fix-network-error-handling-e78da90b6bc2319c.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed error handling when network is not found in nova server creation. diff --git a/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml b/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml deleted file mode 100644 index cd81b4a18..000000000 --- a/releasenotes/notes/fix-node-get-detail-4e6d30c3a6b2ce60.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed get node detail when creating VM is failed - diff --git a/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml b/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml deleted file mode 100644 index 222c18dd7..000000000 --- a/releasenotes/notes/fix-node-leak-9b1c08342a52542d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed node leak when creating node failed. - diff --git a/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml b/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml deleted file mode 100644 index e4883f2d5..000000000 --- a/releasenotes/notes/fix-node-recover-5af129bf0688577d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed node recover operation behavior so that unsupported operations can - be detected and handled. diff --git a/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml b/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml deleted file mode 100644 index c5574968f..000000000 --- a/releasenotes/notes/fix-node-status-for-lb-fc7714da09bec2fb.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - When a node cannot be added to a load-balancer although desired, or it can - not be removed from a load-balancer when requested, the node will be - marked as in WARNING status. diff --git a/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml b/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml deleted file mode 100644 index a43143157..000000000 --- a/releasenotes/notes/fix-openstacksdk -exception-b762e649bfab4b31r.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - In openstacksdk 0.14.0 release, a bug related to SDK exception was fixed - "https://review.openstack.org/#/c/571101/". With that change a SDK exception will contain the - detailed message only if the message string is equal to 'Error'. Fixed the - test_parse_exception_http_exception_no_details to use 'Error' as the exception message to make - the test case pass. diff --git a/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml b/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml deleted file mode 100644 index 1dc7acc05..000000000 --- a/releasenotes/notes/fix-policy-type-version-939a1fb4e84908f9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -issues: - - There are cases where the event listener based health management cannot - successfully stop all listeners. -fixes: - - Enable old versions of builtin policy types to be listed and used. diff --git a/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml b/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml deleted file mode 100644 index 1779cd124..000000000 --- a/releasenotes/notes/fix-port-id-parameter-de4679438a891a67r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Pass in correct port id parameter when calling interface create on a server. diff --git a/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml b/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml deleted file mode 100644 index f6a0718ba..000000000 --- a/releasenotes/notes/fix-recover-trigger-749600f500f7bf4a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed error in the return value of node-check which prevents node-recover - from being triggered. diff --git a/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml b/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml deleted file mode 100644 index c72613c3a..000000000 --- a/releasenotes/notes/fix-registry-claim-5421dca1ed9b0783.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed a problem when claiming a cluster from health registry if service - engine is stopped (killed) and restarted quickly. diff --git a/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml b/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml deleted file mode 100644 index d10ea46bb..000000000 --- a/releasenotes/notes/fix-security-group-with-same-name-887487416f4525a1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Find security group profiles by project scope. diff --git a/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml b/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml deleted file mode 100644 index 074e5b52f..000000000 --- a/releasenotes/notes/fix-tag-for-stacks-2ef70be061e80253.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in updating stack tags when the stack joins or leaves a - cluster. diff --git a/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml b/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml deleted file mode 100644 index bbd6723c5..000000000 --- a/releasenotes/notes/fix-tox-cover-9fc01b5e0594aa19r.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed openstack-tox-cover which was broken as part of the switch to stestr. diff --git a/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml b/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml deleted file mode 100644 index f35e71de6..000000000 --- a/releasenotes/notes/fix-update-lb-policy-0af6e8866f3b5543.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - | - Updates should still be allowed in a DEGRADED state lest LB policy becomes - unable to operate on any partially operational cluster. - - diff --git a/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml b/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml deleted file mode 100644 index a833e8560..000000000 --- a/releasenotes/notes/forbid-cluster-deletion-a8b0f55aaf0aa106.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - A cluster in the middle of an on-going action should not be deletable. - The engine service has been improved to detect this situation. diff --git a/releasenotes/notes/force-delete-0b185ea6d70ed81e.yaml b/releasenotes/notes/force-delete-0b185ea6d70ed81e.yaml deleted file mode 100644 index 9f09c0c54..000000000 --- a/releasenotes/notes/force-delete-0b185ea6d70ed81e.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support to forced deletion of cluster and nodes. diff --git a/releasenotes/notes/gc-for-dead-engine-2246c714edc9a2df.yaml b/releasenotes/notes/gc-for-dead-engine-2246c714edc9a2df.yaml deleted file mode 100644 index 2954968dd..000000000 --- a/releasenotes/notes/gc-for-dead-engine-2246c714edc9a2df.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - When an engine is detected to be dead, the actions (and the clusters/nodes - locked by those actions) are now unlocked. Such clusters and nodes can be - operated again. diff --git a/releasenotes/notes/health-add-cleanup-2d5143ec2bb78e55.yaml b/releasenotes/notes/health-add-cleanup-2d5143ec2bb78e55.yaml deleted file mode 100644 index d5ff2bf16..000000000 --- a/releasenotes/notes/health-add-cleanup-2d5143ec2bb78e55.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Adds a cleanup task on the health manager to help prevent orphaned health checks. diff --git a/releasenotes/notes/health-check-interval-b3850c072600bfdf.yaml b/releasenotes/notes/health-check-interval-b3850c072600bfdf.yaml deleted file mode 100644 index 7d4d6b24f..000000000 --- a/releasenotes/notes/health-check-interval-b3850c072600bfdf.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Sample health policy file was using 60 seconds as the interval which could - be misleading. This has been tuned to 600 seconds. diff --git a/releasenotes/notes/health-lb-polling-32d83803c77cc1d8.yaml b/releasenotes/notes/health-lb-polling-32d83803c77cc1d8.yaml deleted file mode 100644 index ac4edb031..000000000 --- a/releasenotes/notes/health-lb-polling-32d83803c77cc1d8.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Removed LB_STATUS_POLLING from health policy since LBaaS still cannot - provide reliable node status update. diff --git a/releasenotes/notes/health-manager-fixes-d5955f9af88102fc.yaml b/releasenotes/notes/health-manager-fixes-d5955f9af88102fc.yaml deleted file mode 100644 index 0b21b5924..000000000 --- a/releasenotes/notes/health-manager-fixes-d5955f9af88102fc.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - | - Fixes the logic within the health manager to prevent duplicate health checks - from running on the same cluster. -other: - - | - Adds a configuration option to the health manager to control the maximum - amount of threads that can be created by the health manager. diff --git a/releasenotes/notes/health-manager-listener-8ddbe169e510031b.yaml b/releasenotes/notes/health-manager-listener-8ddbe169e510031b.yaml deleted file mode 100644 index 427046996..000000000 --- a/releasenotes/notes/health-manager-listener-8ddbe169e510031b.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The cluster health manager has gained a new feature where nova server - instance failures can be detected and handled, with and without a - health policy attached to a cluster. diff --git a/releasenotes/notes/health-policy-actions-936db8bc3ed08aec.yaml b/releasenotes/notes/health-policy-actions-936db8bc3ed08aec.yaml deleted file mode 100644 index ca19d0ff8..000000000 --- a/releasenotes/notes/health-policy-actions-936db8bc3ed08aec.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Health policy recovery actions now contains a list of dictionaries instead - of a list of simple names. This is to make room for workflow invocations. -fixes: - - The health policy recovery actions is designed to be a list but the current - implementation can only handle one action. This is now explicitly checked. diff --git a/releasenotes/notes/health-policy-mutiple-detection-types-10bfdc80771278cb.yaml b/releasenotes/notes/health-policy-mutiple-detection-types-10bfdc80771278cb.yaml deleted file mode 100644 index 3ea5a89fb..000000000 --- a/releasenotes/notes/health-policy-mutiple-detection-types-10bfdc80771278cb.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -prelude: > - Health policy v1.1 implements multiple detection modes. This implementation - is incompatible with health policy v1.0. -features: - - | - [`blueprint multiple-detection-modes `_] - Health policy v1.1 now supports multiple detection types. The user can - combine node status poll and node poll url types in the health policy in - order to have both checked before a node is considered unhealthy. -upgrade: - - | - This release makes changes to the health policy properties that are - incompatible with health policy v1.0. Any existing policies of type - health policy v1.0 must be removed before upgrading to this release. - After upgrading, the health policies conforming to v1.0 must be recreated - following health policy v1.1 format. diff --git a/releasenotes/notes/health-policy-properties-056d5b4aa63312c9.yaml b/releasenotes/notes/health-policy-properties-056d5b4aa63312c9.yaml deleted file mode 100644 index cb4e6b612..000000000 --- a/releasenotes/notes/health-policy-properties-056d5b4aa63312c9.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - The unimplemented properties for health policy are masked out. diff --git a/releasenotes/notes/health-policy-suspend-7aa33fc981c0f2c9.yaml b/releasenotes/notes/health-policy-suspend-7aa33fc981c0f2c9.yaml deleted file mode 100644 index da415a2de..000000000 --- a/releasenotes/notes/health-policy-suspend-7aa33fc981c0f2c9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The health policy was improved so that it will suspend itself when - a node deletion comes from senlin-engine or client request. The policy - will only effect when node failure is 'unexpected'. diff --git a/releasenotes/notes/health-poll-url-236392171bb28b3f.yaml b/releasenotes/notes/health-poll-url-236392171bb28b3f.yaml deleted file mode 100644 index b30784abd..000000000 --- a/releasenotes/notes/health-poll-url-236392171bb28b3f.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Health policy now contains NODE_STATUS_POLL_URL detection type. This - detection type queries the URL specified in the health policy for node - health status. This allows the user to integrate Senlin health checks - with an external health service. -other: - - Health policy v1.0 was moved from EXPERIMENTAL to SUPPORTED status. diff --git a/releasenotes/notes/health-poll-url-detection-c6f10065a076510dr.yaml b/releasenotes/notes/health-poll-url-detection-c6f10065a076510dr.yaml deleted file mode 100644 index e306168e6..000000000 --- a/releasenotes/notes/health-poll-url-detection-c6f10065a076510dr.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a new detection type that actively pools the node health using a URL specified in the - health policy. That way the user can intergate Senlin's health policy with another custom or 3rd - party health check service. diff --git a/releasenotes/notes/health-reboot-9f74c263f7fb6767.yaml b/releasenotes/notes/health-reboot-9f74c263f7fb6767.yaml deleted file mode 100644 index 29d900130..000000000 --- a/releasenotes/notes/health-reboot-9f74c263f7fb6767.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - A new recovery action "REBOOT" has been added to the health policy. diff --git a/releasenotes/notes/health-recover-9aecfbf2d799abfb.yaml b/releasenotes/notes/health-recover-9aecfbf2d799abfb.yaml deleted file mode 100644 index e25c741b7..000000000 --- a/releasenotes/notes/health-recover-9aecfbf2d799abfb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed bug related to reacting to nova vm lifecycle event notifications. - The recover flow is no longer called twice when a VM is deleted. diff --git a/releasenotes/notes/heat-listener-b908d0988840e1f3.yaml b/releasenotes/notes/heat-listener-b908d0988840e1f3.yaml deleted file mode 100644 index 70bfb335d..000000000 --- a/releasenotes/notes/heat-listener-b908d0988840e1f3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support to listen to heat event notifications for stack failure - detection. diff --git a/releasenotes/notes/keystone-conformance-4e729da9e88b4fb3.yaml b/releasenotes/notes/keystone-conformance-4e729da9e88b4fb3.yaml deleted file mode 100644 index 3d14c26d8..000000000 --- a/releasenotes/notes/keystone-conformance-4e729da9e88b4fb3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - For resources which has a user, a project and a domain property, the - lengths of these columns are increased from 32 chars to 64 chars for a - better conformance with Keystone. diff --git a/releasenotes/notes/kube-token-gen-673ea5c0d26d6872.yaml b/releasenotes/notes/kube-token-gen-673ea5c0d26d6872.yaml deleted file mode 100644 index 6db478e51..000000000 --- a/releasenotes/notes/kube-token-gen-673ea5c0d26d6872.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed the error in token generation for kubeadm. diff --git a/releasenotes/notes/kubernetes-dependents-1d7a70aa43ee8aa4.yaml b/releasenotes/notes/kubernetes-dependents-1d7a70aa43ee8aa4.yaml deleted file mode 100644 index 86cd3921f..000000000 --- a/releasenotes/notes/kubernetes-dependents-1d7a70aa43ee8aa4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added dependency relationship between the master cluster and the worker - cluster creatd for Kubernetes. diff --git a/releasenotes/notes/lb-name-instead-id-f30d4f4e05d350cb.yaml b/releasenotes/notes/lb-name-instead-id-f30d4f4e05d350cb.yaml deleted file mode 100644 index 7abcb9d1b..000000000 --- a/releasenotes/notes/lb-name-instead-id-f30d4f4e05d350cb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - When we use load balancer policy to attach cluster, members will - add to pool by member name. diff --git a/releasenotes/notes/lb-node-actions-95545338ae622f5c.yaml b/releasenotes/notes/lb-node-actions-95545338ae622f5c.yaml deleted file mode 100644 index 63e47204a..000000000 --- a/releasenotes/notes/lb-node-actions-95545338ae622f5c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - The load-balancing policy is improved to handle 'NODE_CREATE' and - 'NODE_DELETE' actions that derive from 'node_create' or 'node_delete' - RPC requests directly. diff --git a/releasenotes/notes/lb-policy-02782a1b98142742.yaml b/releasenotes/notes/lb-policy-02782a1b98142742.yaml deleted file mode 100644 index e22fe82de..000000000 --- a/releasenotes/notes/lb-policy-02782a1b98142742.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in the built-in load-balancing policy that caused by - regression in getting node details for IP addresses. diff --git a/releasenotes/notes/lb-policy-improve-165680731fb76681.yaml b/releasenotes/notes/lb-policy-improve-165680731fb76681.yaml deleted file mode 100644 index b72fb9f97..000000000 --- a/releasenotes/notes/lb-policy-improve-165680731fb76681.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed various problems in load-balancer policy so that it can handle - node-recover and cluster-recover operations properly. diff --git a/releasenotes/notes/lb-policy-improvement-2c18577717d28bb5.yaml b/releasenotes/notes/lb-policy-improvement-2c18577717d28bb5.yaml deleted file mode 100644 index fc438314e..000000000 --- a/releasenotes/notes/lb-policy-improvement-2c18577717d28bb5.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -feature: - - Added support to reuse existing loadbalancer when attaching LB policy. -fixes: - - Fixed various defects in managing node pools for loadbalancer policy. diff --git a/releasenotes/notes/lb-project-restriction-688833a1aec6f04e.yaml b/releasenotes/notes/lb-project-restriction-688833a1aec6f04e.yaml deleted file mode 100644 index 7fbf29b02..000000000 --- a/releasenotes/notes/lb-project-restriction-688833a1aec6f04e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Bypass lb project restriction for get_details in LBaaS driver. diff --git a/releasenotes/notes/lb-support-to-recover-8f822d3c2665e225.yaml b/releasenotes/notes/lb-support-to-recover-8f822d3c2665e225.yaml deleted file mode 100644 index 72290edff..000000000 --- a/releasenotes/notes/lb-support-to-recover-8f822d3c2665e225.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - The load-balancing policy now properly supports the CLUSTER_RECOVER action - and NODE_RECOVER action. diff --git a/releasenotes/notes/lb-timeout-option-990ba1f359b5daab.yaml b/releasenotes/notes/lb-timeout-option-990ba1f359b5daab.yaml deleted file mode 100644 index 0e3e00e7d..000000000 --- a/releasenotes/notes/lb-timeout-option-990ba1f359b5daab.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new "lb_status_timeout" option is added to the LB policy to cope - with load-balancers that are not so responsive. diff --git a/releasenotes/notes/lifecycle-hook-19a9bf85b534107d.yaml b/releasenotes/notes/lifecycle-hook-19a9bf85b534107d.yaml deleted file mode 100644 index 3c6127d38..000000000 --- a/releasenotes/notes/lifecycle-hook-19a9bf85b534107d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - New version of deletion policy (v1.1) is implemented which supports the - specification of lifecycle hooks to be invoked before shrinking the size - of a cluster. For details, please check the policy documentation. diff --git a/releasenotes/notes/loadbalancer-octavia-8ab8be9f703781d1.yaml b/releasenotes/notes/loadbalancer-octavia-8ab8be9f703781d1.yaml deleted file mode 100644 index e6d4bfe1d..000000000 --- a/releasenotes/notes/loadbalancer-octavia-8ab8be9f703781d1.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added support to Octavia as the load-balancer driver. -upgrade: - - The Octavia service must be properly installed and configured to enable - load-balancing policy. diff --git a/releasenotes/notes/lock-break-for-dead-service-0abd3d3ea333622c.yaml b/releasenotes/notes/lock-break-for-dead-service-0abd3d3ea333622c.yaml deleted file mode 100644 index 9954b6439..000000000 --- a/releasenotes/notes/lock-break-for-dead-service-0abd3d3ea333622c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -critical: - - The problem of having clusters or nodes still locked by actions executed - by a dead engine is fixed. diff --git a/releasenotes/notes/lock-retry-4d1c52ff4d42a3f9.yaml b/releasenotes/notes/lock-retry-4d1c52ff4d42a3f9.yaml deleted file mode 100644 index 35f42f796..000000000 --- a/releasenotes/notes/lock-retry-4d1c52ff4d42a3f9.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - DB lock contentions are alleviated by allowing lock retries. diff --git a/releasenotes/notes/lock-retry-ab31681e74997cf9.yaml b/releasenotes/notes/lock-retry-ab31681e74997cf9.yaml deleted file mode 100644 index b43670964..000000000 --- a/releasenotes/notes/lock-retry-ab31681e74997cf9.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Fixed cluster and node lock management so that failed lock acquire - operations are automatically retried. This is an important fix for - running multiple service engines. diff --git a/releasenotes/notes/message-receiver-3432826515f8e70c.yaml b/releasenotes/notes/message-receiver-3432826515f8e70c.yaml deleted file mode 100644 index f9ba2bb2b..000000000 --- a/releasenotes/notes/message-receiver-3432826515f8e70c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a new type of receiver (i.e. message) which is based on Zaqar - message queue. diff --git a/releasenotes/notes/message-topic-7c642cff317f2bc7.yaml b/releasenotes/notes/message-topic-7c642cff317f2bc7.yaml deleted file mode 100644 index 301959382..000000000 --- a/releasenotes/notes/message-topic-7c642cff317f2bc7.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new configuration option is exposed for the message topic to use when - sending event notifications. diff --git a/releasenotes/notes/metadata-query-profile-9c45d99db7b30207.yaml b/releasenotes/notes/metadata-query-profile-9c45d99db7b30207.yaml deleted file mode 100644 index be60adc05..000000000 --- a/releasenotes/notes/metadata-query-profile-9c45d99db7b30207.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Removed 'metadata' from profile query parameters because the current - support is known to have issues. diff --git a/releasenotes/notes/more-policy-validation-ace6a4f890b2a500.yaml b/releasenotes/notes/more-policy-validation-ace6a4f890b2a500.yaml deleted file mode 100644 index e8603d08b..000000000 --- a/releasenotes/notes/more-policy-validation-ace6a4f890b2a500.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The region placement policy and the zone placement policy have been - augmented with spec validation support. diff --git a/releasenotes/notes/more-server-operations-dd77e83b705c28f0.yaml b/releasenotes/notes/more-server-operations-dd77e83b705c28f0.yaml deleted file mode 100644 index f8f026bf5..000000000 --- a/releasenotes/notes/more-server-operations-dd77e83b705c28f0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Many new operations are added to os.nova.server profile type. These - operations can be shown using the "profile-type-ops" API. diff --git a/releasenotes/notes/new-api-doc-f21eb0a9f53d7643.yaml b/releasenotes/notes/new-api-doc-f21eb0a9f53d7643.yaml deleted file mode 100644 index 244d97bff..000000000 --- a/releasenotes/notes/new-api-doc-f21eb0a9f53d7643.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Reworked API documentation which is now published at - https://developer.openstack.org/api-ref/clustering diff --git a/releasenotes/notes/new-config-options-a963e5841d35ef03.yaml b/releasenotes/notes/new-config-options-a963e5841d35ef03.yaml deleted file mode 100644 index e6a35887a..000000000 --- a/releasenotes/notes/new-config-options-a963e5841d35ef03.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - New configuration option "database_retry_limit" is added for customizing - the maximum retries for failed operations on the database. The default - value is 10. - - | - New configuration option "database_retry_interval" is added for specifying - the number of seconds between database operation retries. The default - value is 0.1. - - | - New configuration option "database_max_retry_interval" is added for users - to specify the maximum number of seconds between database operation retries. - The default value is 2. diff --git a/releasenotes/notes/new-node-create-08fe53674b0baab2.yaml b/releasenotes/notes/new-node-create-08fe53674b0baab2.yaml deleted file mode 100644 index 5ca22383a..000000000 --- a/releasenotes/notes/new-node-create-08fe53674b0baab2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Node creation request that might break cluster size constraints now results - in node ERROR status. diff --git a/releasenotes/notes/node-action-logic-4d3e94818cccaa3e.yaml b/releasenotes/notes/node-action-logic-4d3e94818cccaa3e.yaml deleted file mode 100644 index 291d6d181..000000000 --- a/releasenotes/notes/node-action-logic-4d3e94818cccaa3e.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - The node action execution logic is fixed so that it will skip cluster - checking for orphan nodes and policy checking will be skipped for derived - node actions. diff --git a/releasenotes/notes/node-adopt-289a3cea24d8eb78.yaml b/releasenotes/notes/node-adopt-289a3cea24d8eb78.yaml deleted file mode 100644 index be7b26d20..000000000 --- a/releasenotes/notes/node-adopt-289a3cea24d8eb78.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Added support to adopt an existing object as Senlin node given the UUID - and profile type to use. diff --git a/releasenotes/notes/node-check-50d4b67796e17afb.yaml b/releasenotes/notes/node-check-50d4b67796e17afb.yaml deleted file mode 100644 index 2f652248e..000000000 --- a/releasenotes/notes/node-check-50d4b67796e17afb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error in parameter checking logic for node-recover operation - which prevented valid parameters from being accepted. diff --git a/releasenotes/notes/node-check-before-recover-abf887a39ab0d355.yaml b/releasenotes/notes/node-check-before-recover-abf887a39ab0d355.yaml deleted file mode 100644 index f59a0c0b8..000000000 --- a/releasenotes/notes/node-check-before-recover-abf887a39ab0d355.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - API microversion 1.6 comes with an optional parameter 'check' that tells - the engine to perform a health check before doing actual recovery. - This applies to both clusters and nodes. diff --git a/releasenotes/notes/node-create-affinity-ec126ccd3e9e0957.yaml b/releasenotes/notes/node-create-affinity-ec126ccd3e9e0957.yaml deleted file mode 100644 index 89bf84858..000000000 --- a/releasenotes/notes/node-create-affinity-ec126ccd3e9e0957.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The affinity policy is improved to handle NODE_CREATE actions which are - derived from 'node_create' RPC requests. diff --git a/releasenotes/notes/node-create-az-d886dea98a25229f.yaml b/releasenotes/notes/node-create-az-d886dea98a25229f.yaml deleted file mode 100644 index 7ad1b7572..000000000 --- a/releasenotes/notes/node-create-az-d886dea98a25229f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The availability-zone placement policy is improved to handle NODE_CREATE - actions which are derived from 'node_create' RPC requests. diff --git a/releasenotes/notes/node-create-region-0cbac0918c703e27.yaml b/releasenotes/notes/node-create-region-0cbac0918c703e27.yaml deleted file mode 100644 index 1bb4be684..000000000 --- a/releasenotes/notes/node-create-region-0cbac0918c703e27.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The region placement policy is improved to handle the NODE_CREATE action - which derives from a 'node_create' RPC request. diff --git a/releasenotes/notes/node-delete-force-e4a69831af0b145d.yaml b/releasenotes/notes/node-delete-force-e4a69831af0b145d.yaml deleted file mode 100644 index ab515e890..000000000 --- a/releasenotes/notes/node-delete-force-e4a69831af0b145d.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed a bug related to force delete nodes. diff --git a/releasenotes/notes/node-detail-volumes-8e29c734f4f43442.yaml b/releasenotes/notes/node-detail-volumes-8e29c734f4f43442.yaml deleted file mode 100644 index f46893d34..000000000 --- a/releasenotes/notes/node-detail-volumes-8e29c734f4f43442.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Node details view now includes attached_volumes. diff --git a/releasenotes/notes/node-health-check-0c94b9fecf35e677.yaml b/releasenotes/notes/node-health-check-0c94b9fecf35e677.yaml deleted file mode 100644 index 6a72017b6..000000000 --- a/releasenotes/notes/node-health-check-0c94b9fecf35e677.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -other: - - Improved Nova VM server health check for cases where physical id is invalid. diff --git a/releasenotes/notes/node-join-leave-8b00f64cf55b675a.yaml b/releasenotes/notes/node-join-leave-8b00f64cf55b675a.yaml deleted file mode 100644 index 54ab82a42..000000000 --- a/releasenotes/notes/node-join-leave-8b00f64cf55b675a.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Added exception handling for node-join and node-leave operations. diff --git a/releasenotes/notes/node-name-formatter-284b768be7fbe6c6.yaml b/releasenotes/notes/node-name-formatter-284b768be7fbe6c6.yaml deleted file mode 100644 index ea22d83b0..000000000 --- a/releasenotes/notes/node-name-formatter-284b768be7fbe6c6.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added cluster config property "node.name.format" where users can specify - how cluster nodes are automatically named. Users can use placeholders like - "$nI" for node index padded with 0s to the left, or "$nR" for random string - of length n. diff --git a/releasenotes/notes/node-op-api-a7bede34c51854ee.yaml b/releasenotes/notes/node-op-api-a7bede34c51854ee.yaml deleted file mode 100644 index 005797a33..000000000 --- a/releasenotes/notes/node-op-api-a7bede34c51854ee.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added new `node-operation` API for performing profile type supported - operations on a node. diff --git a/releasenotes/notes/node-op-return-value-73720cf91b6e2672.yaml b/releasenotes/notes/node-op-return-value-73720cf91b6e2672.yaml deleted file mode 100644 index 495196d07..000000000 --- a/releasenotes/notes/node-op-return-value-73720cf91b6e2672.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed the return value from a node operation call. diff --git a/releasenotes/notes/node-ops-115d9d64f6e261db.yaml b/releasenotes/notes/node-ops-115d9d64f6e261db.yaml deleted file mode 100644 index 28b4ffc75..000000000 --- a/releasenotes/notes/node-ops-115d9d64f6e261db.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - New API "node-op" is introduced for triggering profile type specific - operations on a node. This is available since API micro-version 1.4. diff --git a/releasenotes/notes/node-physical-id-f3393fb1a1eba4f7.yaml b/releasenotes/notes/node-physical-id-f3393fb1a1eba4f7.yaml deleted file mode 100644 index b42e82ed3..000000000 --- a/releasenotes/notes/node-physical-id-f3393fb1a1eba4f7.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Relaxed constraint on node physical_id property. Any string value is now - treated as valid value even if it is not an UUID. diff --git a/releasenotes/notes/node-recover-ace5311e23030f20.yaml b/releasenotes/notes/node-recover-ace5311e23030f20.yaml deleted file mode 100644 index fdf06bdc4..000000000 --- a/releasenotes/notes/node-recover-ace5311e23030f20.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Fixed defects in node recover operation to ensure node status is properly - handled. - - | - Improved logic in rebooting and rebuilding nova server nodes so that - exceptions are caught and handled. diff --git a/releasenotes/notes/node-recover-fix-cc054c3f763654a0.yaml b/releasenotes/notes/node-recover-fix-cc054c3f763654a0.yaml deleted file mode 100644 index a14c07db2..000000000 --- a/releasenotes/notes/node-recover-fix-cc054c3f763654a0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed an error where action name not passed to backend service. diff --git a/releasenotes/notes/node-role-fix-211d1536dd66066d.yaml b/releasenotes/notes/node-role-fix-211d1536dd66066d.yaml deleted file mode 100644 index 43c1f0675..000000000 --- a/releasenotes/notes/node-role-fix-211d1536dd66066d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - | - Fixed the "role" field used when creating/updating a node. diff --git a/releasenotes/notes/node-tainted-1d1c0f885cd3e4a8.yaml b/releasenotes/notes/node-tainted-1d1c0f885cd3e4a8.yaml deleted file mode 100644 index 5e1231643..000000000 --- a/releasenotes/notes/node-tainted-1d1c0f885cd3e4a8.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - Add tainted field to nodes. A node with tainted set to True will be - selected first for scale-in operations. diff --git a/releasenotes/notes/node-update-timestamp-43b9639e22267598.yaml b/releasenotes/notes/node-update-timestamp-43b9639e22267598.yaml deleted file mode 100644 index 829f087c9..000000000 --- a/releasenotes/notes/node-update-timestamp-43b9639e22267598.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed the problem that the "updated_at" timestamp of a node was not - correctly updated. diff --git a/releasenotes/notes/non-operation-recover-cf0f3c0ac62bb0f3.yaml b/releasenotes/notes/non-operation-recover-cf0f3c0ac62bb0f3.yaml deleted file mode 100644 index 7f9c05789..000000000 --- a/releasenotes/notes/non-operation-recover-cf0f3c0ac62bb0f3.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed error that raises when no operation is provided during node health - recovery. diff --git a/releasenotes/notes/notification-operations-c7bdaa9b56e5011f.yaml b/releasenotes/notes/notification-operations-c7bdaa9b56e5011f.yaml deleted file mode 100644 index 336611a58..000000000 --- a/releasenotes/notes/notification-operations-c7bdaa9b56e5011f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The notifications of profile type specific operations were not properly - reporting the operation's name. This has been fixed. diff --git a/releasenotes/notes/notification-retry-logic-cb9933b4826c9d45.yaml b/releasenotes/notes/notification-retry-logic-cb9933b4826c9d45.yaml deleted file mode 100644 index 5ed548e38..000000000 --- a/releasenotes/notes/notification-retry-logic-cb9933b4826c9d45.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added retry logic to post_lifecycle_hook_message when posting a lifecyle hook to Zaqar. - diff --git a/releasenotes/notes/notification-support-a7e2ebc816bb4009.yaml b/releasenotes/notes/notification-support-a7e2ebc816bb4009.yaml deleted file mode 100644 index 93c92edc4..000000000 --- a/releasenotes/notes/notification-support-a7e2ebc816bb4009.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Event notifications (versioned) are added to enable senlin-engine to send - out messaging events when configured. The old event repo is adapted to - follow the same design. -upgrade: - - New setup configuration items are provided to enable the "message" and/or - "database" event generation. diff --git a/releasenotes/notes/notification-transport-ae49e9cb1813cd96.yaml b/releasenotes/notes/notification-transport-ae49e9cb1813cd96.yaml deleted file mode 100644 index c87173ac8..000000000 --- a/releasenotes/notes/notification-transport-ae49e9cb1813cd96.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed the notification logic so that it uses the proper transport obtained - from oslo.messaging. diff --git a/releasenotes/notes/nova-az-fccf8db758642d34.yaml b/releasenotes/notes/nova-az-fccf8db758642d34.yaml deleted file mode 100644 index e00cc17b7..000000000 --- a/releasenotes/notes/nova-az-fccf8db758642d34.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed an error introduced by openstacksdk when checking/setting the - availability zone of a nova server. diff --git a/releasenotes/notes/nova-get-image-726aa195c17a294f.yaml b/releasenotes/notes/nova-get-image-726aa195c17a294f.yaml deleted file mode 100644 index b9fc11f18..000000000 --- a/releasenotes/notes/nova-get-image-726aa195c17a294f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed nova profile logic when updating image. We will always use the - current image as the effective one. diff --git a/releasenotes/notes/nova-metadata-fix-89b7a2e06c3ce59f.yaml b/releasenotes/notes/nova-metadata-fix-89b7a2e06c3ce59f.yaml deleted file mode 100644 index 0aed0aa75..000000000 --- a/releasenotes/notes/nova-metadata-fix-89b7a2e06c3ce59f.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug introduced by openstacksdk when updating nova server metadata. diff --git a/releasenotes/notes/nova-metadata-update-d1ab297f0e998117.yaml b/releasenotes/notes/nova-metadata-update-d1ab297f0e998117.yaml deleted file mode 100644 index 656b1dae8..000000000 --- a/releasenotes/notes/nova-metadata-update-d1ab297f0e998117.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - Simply update the nova server key/value pairs that we need to update rather - than completely deleting and recreating the dictionary from scratch. diff --git a/releasenotes/notes/nova-server-addresses-fd8afddc3fb36a0c.yaml b/releasenotes/notes/nova-server-addresses-fd8afddc3fb36a0c.yaml deleted file mode 100644 index 101b39370..000000000 --- a/releasenotes/notes/nova-server-addresses-fd8afddc3fb36a0c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - The 'details/addresses' property of a node output for a nova server used - to contain only some trimed information. This has been changed to a - faithful dumping of the 'addresses' property. diff --git a/releasenotes/notes/nova-server-validation-60612c1185738104.yaml b/releasenotes/notes/nova-server-validation-60612c1185738104.yaml deleted file mode 100644 index a3c12856c..000000000 --- a/releasenotes/notes/nova-server-validation-60612c1185738104.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The 'image', 'flavor', 'key_name' and 'networks' properties of a - nova server profile can now be validated via profile-validate API. diff --git a/releasenotes/notes/nova-server-validation-d36dbcf64fb90a43.yaml b/releasenotes/notes/nova-server-validation-d36dbcf64fb90a43.yaml deleted file mode 100644 index 04e74747b..000000000 --- a/releasenotes/notes/nova-server-validation-d36dbcf64fb90a43.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - With the new 'profile-validate' API, the nova server profile now supports - the validation of its 'flavor', 'image' (if provided), 'availability_zone' - and block device driver properties. diff --git a/releasenotes/notes/nova-update-opt-7372e4d189e483aa.yaml b/releasenotes/notes/nova-update-opt-7372e4d189e483aa.yaml deleted file mode 100644 index 67381c719..000000000 --- a/releasenotes/notes/nova-update-opt-7372e4d189e483aa.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Optimized nova server update so that password and server name can be - updated with and without image-based rebuild. diff --git a/releasenotes/notes/nova-update-validation-dca7de984c2071d1.yaml b/releasenotes/notes/nova-update-validation-dca7de984c2071d1.yaml deleted file mode 100644 index ed0aebbf8..000000000 --- a/releasenotes/notes/nova-update-validation-dca7de984c2071d1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Added validation of key_name, flavor, image, networks when updating nova - server. diff --git a/releasenotes/notes/ocata-2-c2e184a0b76231e8.yaml b/releasenotes/notes/ocata-2-c2e184a0b76231e8.yaml deleted file mode 100644 index aee82737d..000000000 --- a/releasenotes/notes/ocata-2-c2e184a0b76231e8.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Versioned request support in API, RPC and engine layers. - - Basic support for event/notification. - - Enables osprofiler support. - - Rally plugin for cluster scaling in. - - Batch policy support for cluster actions. - - Integration test for message receiver. diff --git a/releasenotes/notes/octavia-network_id-and-subnet_id-changes-9ba43e19ae29ac7d.yaml b/releasenotes/notes/octavia-network_id-and-subnet_id-changes-9ba43e19ae29ac7d.yaml deleted file mode 100644 index 6037a5a30..000000000 --- a/releasenotes/notes/octavia-network_id-and-subnet_id-changes-9ba43e19ae29ac7d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Loadbalancers incorrectly required a VIP subnet, when they should actually - accept either a VIP subnet or VIP network. Now either/both is acceptable. diff --git a/releasenotes/notes/options-shuffled-29c6cfac72aaf8ff.yaml b/releasenotes/notes/options-shuffled-29c6cfac72aaf8ff.yaml deleted file mode 100644 index 240e3deaa..000000000 --- a/releasenotes/notes/options-shuffled-29c6cfac72aaf8ff.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Several configuration options are consolidated into the 'senlin_api' group - in 'senlin.conf' file ('api_paste_config', 'wsgi_keep_alive', - 'client_socket_timeout', 'max_json_body_size'). diff --git a/releasenotes/notes/oslo-versioned-object-support-cc9463490306c26f.yaml b/releasenotes/notes/oslo-versioned-object-support-cc9463490306c26f.yaml deleted file mode 100644 index ea485982c..000000000 --- a/releasenotes/notes/oslo-versioned-object-support-cc9463490306c26f.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added support to oslo.versionedobject so that DB interactions are - abstracted. It is possible to do live upgrade for senlin service now. diff --git a/releasenotes/notes/param-check-cluster-update-58d4712a33f74c6e.yaml b/releasenotes/notes/param-check-cluster-update-58d4712a33f74c6e.yaml deleted file mode 100644 index fe680e2e7..000000000 --- a/releasenotes/notes/param-check-cluster-update-58d4712a33f74c6e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The parameter checking for the cluster update operation may incorrectly - parse the provided value(s). This bug has been fixed. diff --git a/releasenotes/notes/path-check-collect-1e542762cbcd65d2.yaml b/releasenotes/notes/path-check-collect-1e542762cbcd65d2.yaml deleted file mode 100644 index f703cd8cf..000000000 --- a/releasenotes/notes/path-check-collect-1e542762cbcd65d2.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bug related to cluster-collect API where the path parameter is None. diff --git a/releasenotes/notes/policy-enabling-61d0c38aecf314eb.yaml b/releasenotes/notes/policy-enabling-61d0c38aecf314eb.yaml deleted file mode 100644 index 487ea986d..000000000 --- a/releasenotes/notes/policy-enabling-61d0c38aecf314eb.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - When attaching a policy (especially a health policy) to a cluster, users - may choose to keep the policy disabled. This has to be considered in the - health manager and other places. This issue is fixed. diff --git a/releasenotes/notes/policy-fixes-24857037ac054999.yaml b/releasenotes/notes/policy-fixes-24857037ac054999.yaml deleted file mode 100644 index 3f0a5dee6..000000000 --- a/releasenotes/notes/policy-fixes-24857037ac054999.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed bugs in deletion zone policy and region policy which were not able - to correctly parse node reference. diff --git a/releasenotes/notes/policy-in-code-05970b66eb27481a.yaml b/releasenotes/notes/policy-in-code-05970b66eb27481a.yaml deleted file mode 100644 index 36425344a..000000000 --- a/releasenotes/notes/policy-in-code-05970b66eb27481a.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -features: - - | - Senlin now support policy in code, which means if users didn't modify - any of policy rules, they can leave policy file (in `json` or `yaml` - format) empty or not deploy it at all. Because from now, Senlin keeps - all default policies under `senlin/common/policies` module. - Users can modify/generate `policy.yaml` file which will override policy - rules in code if those rules show in `policy.yaml` file. - Users also still use `policy.json` file but oslo team recommend that we - should use the newer YAML format instead. - -other: - - | - Default `policy.json` file is now removed as Senlin now generate the - default policies from code. Please be aware that when using that file in - your environment. diff --git a/releasenotes/notes/policy-performance-4d2fa57ccc45bbf1.yaml b/releasenotes/notes/policy-performance-4d2fa57ccc45bbf1.yaml deleted file mode 100644 index 08bd53819..000000000 --- a/releasenotes/notes/policy-performance-4d2fa57ccc45bbf1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Built-in policies are optimized for reducing DB transactions. diff --git a/releasenotes/notes/policy-retry-251cf15f06368ad4.yaml b/releasenotes/notes/policy-retry-251cf15f06368ad4.yaml deleted file mode 100644 index 49778bc50..000000000 --- a/releasenotes/notes/policy-retry-251cf15f06368ad4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - The policy attach and detach actions are improved to automatically retry - on failed attempts. diff --git a/releasenotes/notes/policy-validate-04cbc74d2c025fcc.yaml b/releasenotes/notes/policy-validate-04cbc74d2c025fcc.yaml deleted file mode 100644 index 695f246e4..000000000 --- a/releasenotes/notes/policy-validate-04cbc74d2c025fcc.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new policy-validate API has been added to validate the spec of a - policy without actually creating an instance of it. diff --git a/releasenotes/notes/policy-validation-477a103aa83835f9.yaml b/releasenotes/notes/policy-validation-477a103aa83835f9.yaml deleted file mode 100644 index 6e42ceadb..000000000 --- a/releasenotes/notes/policy-validation-477a103aa83835f9.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The affinity policy, loadbalancing policy now support spec validation. - Invalid properties can be detected using policy-validate API. diff --git a/releasenotes/notes/profile-only-update-5cdb3ae46a8139a8.yaml b/releasenotes/notes/profile-only-update-5cdb3ae46a8139a8.yaml deleted file mode 100644 index fcbe887cc..000000000 --- a/releasenotes/notes/profile-only-update-5cdb3ae46a8139a8.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - | - A new feature is introduced in API microversion 1.6 which permits a cluster - update operation to change the profile used by the cluster only without - actually updating the existing nodes (if any). The new profile will be used - when new nodes are created as members of the cluster. diff --git a/releasenotes/notes/profile-type-ops-1f0f2e6e6b5b1999.yaml b/releasenotes/notes/profile-type-ops-1f0f2e6e6b5b1999.yaml deleted file mode 100644 index 3db3311b0..000000000 --- a/releasenotes/notes/profile-type-ops-1f0f2e6e6b5b1999.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new API "profile-type-ops" is introduced to expose the profile type - specific operations' schema to end users. diff --git a/releasenotes/notes/profile-validate-45a9bc520880bc6b.yaml b/releasenotes/notes/profile-validate-45a9bc520880bc6b.yaml deleted file mode 100644 index f67093ae2..000000000 --- a/releasenotes/notes/profile-validate-45a9bc520880bc6b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - A new profile-validate API has been added to validate the spec of a - profile without actually creating an instance of it. diff --git a/releasenotes/notes/receiver-create-71ae7367427bf81c.yaml b/releasenotes/notes/receiver-create-71ae7367427bf81c.yaml deleted file mode 100644 index a0a37826d..000000000 --- a/releasenotes/notes/receiver-create-71ae7367427bf81c.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed an error introduced by oslo.versionedobjects change that lead to - failures when creating a receiver. diff --git a/releasenotes/notes/receiver-create-check-2225f536f5150065.yaml b/releasenotes/notes/receiver-create-check-2225f536f5150065.yaml deleted file mode 100644 index 5afc0033f..000000000 --- a/releasenotes/notes/receiver-create-check-2225f536f5150065.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - With the newly added 'message' type of receivers, the 'cluster' and - the 'action' property are not always required when creating a receiver. - They are still required if the receiver type is 'webhook' (the default). diff --git a/releasenotes/notes/receiver-create-trust-bd5fdeb059e68330.yaml b/releasenotes/notes/receiver-create-trust-bd5fdeb059e68330.yaml deleted file mode 100644 index 920e2cfdf..000000000 --- a/releasenotes/notes/receiver-create-trust-bd5fdeb059e68330.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Fixed bugs related to receiver creation when type is set to 'message'. diff --git a/releasenotes/notes/receiver-filter-by-user-ab35a2ab8e2690d1.yaml b/releasenotes/notes/receiver-filter-by-user-ab35a2ab8e2690d1.yaml deleted file mode 100644 index c8d17432f..000000000 --- a/releasenotes/notes/receiver-filter-by-user-ab35a2ab8e2690d1.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fix: - - Receiver list now can be filtered by the user who created it. This didn't - work before. diff --git a/releasenotes/notes/receiver-update-f97dc556ce3bf22e.yaml b/releasenotes/notes/receiver-update-f97dc556ce3bf22e.yaml deleted file mode 100644 index 55ecf1d03..000000000 --- a/releasenotes/notes/receiver-update-f97dc556ce3bf22e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - New operation introduced for updating the parameters of a receiver. diff --git a/releasenotes/notes/receiver-webhook-d972369731a6ed72.yaml b/releasenotes/notes/receiver-webhook-d972369731a6ed72.yaml deleted file mode 100644 index b26e0e098..000000000 --- a/releasenotes/notes/receiver-webhook-d972369731a6ed72.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixed a bug related to webhook ID in the channel info of a receiver. - The channel info now always contains valid webhook ID. diff --git a/releasenotes/notes/receiver-webhook-v2-a7a24ae6720b5151.yaml b/releasenotes/notes/receiver-webhook-v2-a7a24ae6720b5151.yaml deleted file mode 100644 index 8e413ed7e..000000000 --- a/releasenotes/notes/receiver-webhook-v2-a7a24ae6720b5151.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - | - Added webhook v2 support:Previously webhook API introduced microversion 1.10 - to allow callers to pass arbritary data in the body along with the webhook - call. This was done so that webhooks would work with aodh again. - However, aodh and most webhook callers cannot pass in the header necessary - to specify the microversion. Thus, we introduce webhook v2 so that webhooks - behave like in microversion 1.10 but without the need to specify that - microversion header. - - diff --git a/releasenotes/notes/remove-bdm-v1-4533677f3bca3c5d.yaml b/releasenotes/notes/remove-bdm-v1-4533677f3bca3c5d.yaml deleted file mode 100644 index ec264fb3f..000000000 --- a/releasenotes/notes/remove-bdm-v1-4533677f3bca3c5d.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - Deprecate 'block_device_mapping' from nova server profile since it was - never supported by OpenStack SDK. diff --git a/releasenotes/notes/remove-py35-test-bc81b608d6afeb4a.yaml b/releasenotes/notes/remove-py35-test-bc81b608d6afeb4a.yaml deleted file mode 100644 index 8820c4ef5..000000000 --- a/releasenotes/notes/remove-py35-test-bc81b608d6afeb4a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - All the integration testing has been moved to Bionic now and py3.5 is not - tested runtime for Train or stable/stein. diff --git a/releasenotes/notes/requirement-update-941ebb5825ee9f29.yaml b/releasenotes/notes/requirement-update-941ebb5825ee9f29.yaml deleted file mode 100644 index b6f4d5836..000000000 --- a/releasenotes/notes/requirement-update-941ebb5825ee9f29.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -other: - - | - Updated sphinx dependency with global requirements. It caps python 2 since - sphinx 2.0 no longer supports Python 2.7. - Updated hacking version to latest. \ No newline at end of file diff --git a/releasenotes/notes/requirement-update-victoria-3b150cddd189db7d.yaml b/releasenotes/notes/requirement-update-victoria-3b150cddd189db7d.yaml deleted file mode 100644 index 1076b6fb5..000000000 --- a/releasenotes/notes/requirement-update-victoria-3b150cddd189db7d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -other: - - | - Fix hacking lower constraints to 3.0.1 - Fix jsonschema lower constraints to 3.2.0 - Remove Babel requirement - Remove six requirement - Remove mock requirement, unittest.mock instead diff --git a/releasenotes/notes/resize-params-ab4942dc11f05d9a.yaml b/releasenotes/notes/resize-params-ab4942dc11f05d9a.yaml deleted file mode 100644 index 69c90672c..000000000 --- a/releasenotes/notes/resize-params-ab4942dc11f05d9a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - The parameter checking for cluster-resize operation is revised so that - min_step will be ignored if the ajustment type is not CHANGE_IN_PERCENTAGE. diff --git a/releasenotes/notes/scaling-policy-validation-e2a1d3049e03c316.yaml b/releasenotes/notes/scaling-policy-validation-e2a1d3049e03c316.yaml deleted file mode 100644 index 61fc6642a..000000000 --- a/releasenotes/notes/scaling-policy-validation-e2a1d3049e03c316.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - The numeric properties in the spec for a scaling policy now have stricter - validations. diff --git a/releasenotes/notes/schedule-improved-6996965f07450b35.yaml b/releasenotes/notes/schedule-improved-6996965f07450b35.yaml deleted file mode 100644 index 5ecdca69f..000000000 --- a/releasenotes/notes/schedule-improved-6996965f07450b35.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The action scheduler has been refactored so that no premature sleeping will - be performed and no unwanted exceptions will be thrown when shutting down - workers. diff --git a/releasenotes/notes/scheduler-enhancement-09f86efe4dde4051.yaml b/releasenotes/notes/scheduler-enhancement-09f86efe4dde4051.yaml deleted file mode 100644 index b8f6efb42..000000000 --- a/releasenotes/notes/scheduler-enhancement-09f86efe4dde4051.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Engine scheduler was redesigned to work in "tickless" way. diff --git a/releasenotes/notes/scheduler-thread-pool-size-40905866197ef8bd.yaml b/releasenotes/notes/scheduler-thread-pool-size-40905866197ef8bd.yaml deleted file mode 100644 index ef4c3aa4c..000000000 --- a/releasenotes/notes/scheduler-thread-pool-size-40905866197ef8bd.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - | - Added scheduler thread pool size configuration value and changed default - thread pool size for scheduler from 10 to 1000. This fix prevents problems - when a large number of cluster operations are executed simultaneously. diff --git a/releasenotes/notes/secure-password-e60243ae2befbbf6.yaml b/releasenotes/notes/secure-password-e60243ae2befbbf6.yaml deleted file mode 100644 index 5b6565279..000000000 --- a/releasenotes/notes/secure-password-e60243ae2befbbf6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security: - - The configuration option 'service_password' is marked as secret so that - its value won't get leaked into log files. diff --git a/releasenotes/notes/senlin-osprofiler-fc8cb7161bdb1a6e.yaml b/releasenotes/notes/senlin-osprofiler-fc8cb7161bdb1a6e.yaml deleted file mode 100644 index 57f51f17e..000000000 --- a/releasenotes/notes/senlin-osprofiler-fc8cb7161bdb1a6e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Integrated OSProfiler into Senlin, support using OSProfiler to measure - performance of Senlin. diff --git a/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml b/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml deleted file mode 100644 index 29d8e9b91..000000000 --- a/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -prelude: > - Added new tool ``senlin-status upgrade check``. -features: - - | - New framework for ``senlin-status upgrade check`` command is added. - This framework allows adding various checks which can be run before a - Senlin upgrade to ensure if the upgrade can be performed safely. -upgrade: - - | - Operator can now use new CLI tool ``senlin-status upgrade check`` - to check if Senlin deployment can be safely upgraded from - N-1 to N release. diff --git a/releasenotes/notes/server-image-id-27c1619fa818c6a0.yaml b/releasenotes/notes/server-image-id-27c1619fa818c6a0.yaml deleted file mode 100644 index ae129b27e..000000000 --- a/releasenotes/notes/server-image-id-27c1619fa818c6a0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - A nova server, if booted from volume, will not return a valid image ID. - This situation is now taken care of. diff --git a/releasenotes/notes/service-cleanup-afacddfacd7b4dcd.yaml b/releasenotes/notes/service-cleanup-afacddfacd7b4dcd.yaml deleted file mode 100644 index 1be846bd4..000000000 --- a/releasenotes/notes/service-cleanup-afacddfacd7b4dcd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed dead service clean-up logic so that the clean-up operation can be - retried. diff --git a/releasenotes/notes/service-list-5f4037ae52514f2a.yaml b/releasenotes/notes/service-list-5f4037ae52514f2a.yaml deleted file mode 100644 index 0077b2f37..000000000 --- a/releasenotes/notes/service-list-5f4037ae52514f2a.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - New API introduced to list the running service engines. diff --git a/releasenotes/notes/service-status-report-625bc25b89907e07.yaml b/releasenotes/notes/service-status-report-625bc25b89907e07.yaml deleted file mode 100644 index 8223ffd8f..000000000 --- a/releasenotes/notes/service-status-report-625bc25b89907e07.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The 'senlin-manage' command has been fixed so that it will report the - senlin service status correctly. diff --git a/releasenotes/notes/service-update-2e96dd86295ddfa0.yaml b/releasenotes/notes/service-update-2e96dd86295ddfa0.yaml deleted file mode 100644 index e541bce4d..000000000 --- a/releasenotes/notes/service-update-2e96dd86295ddfa0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Added exception handling for service status update. This is making service - management more stable. diff --git a/releasenotes/notes/setup-script-648e9bfb89bb6255.yaml b/releasenotes/notes/setup-script-648e9bfb89bb6255.yaml deleted file mode 100644 index d47db0c2a..000000000 --- a/releasenotes/notes/setup-script-648e9bfb89bb6255.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - The setup-service script now supports the customization of service project - name and service role name. diff --git a/releasenotes/notes/skip-lifecycle-completion-b528464e11071666.yaml b/releasenotes/notes/skip-lifecycle-completion-b528464e11071666.yaml deleted file mode 100644 index 77cf58436..000000000 --- a/releasenotes/notes/skip-lifecycle-completion-b528464e11071666.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - | - The lifecycle hooks feature added during Queens cycle is improved to handle - cases where a node no longer exists. The lifecycle is only effective when - the target node exists and active. diff --git a/releasenotes/notes/split-engine-service-acea7821cadf9d00.yaml b/releasenotes/notes/split-engine-service-acea7821cadf9d00.yaml deleted file mode 100644 index d141bcd12..000000000 --- a/releasenotes/notes/split-engine-service-acea7821cadf9d00.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -prelude: > - The Senlin-Engine was responsible for a large number of threaded - tasks. To help lower the number of potential threads per process and to - make the Engine more resilient, starting with OpenStack Ussuri, the Engine - service has been split into three services, ``senlin-conductor``, - ``senlin-engine`` and ``senlin-health-manager``. -upgrade: - - | - Two new services has been introduced that will need to be started - after the upgrade, ``senlin-conductor`` and ``senlin-health-manager``. - - With the introduction of these new services new configuration options - were added to allow operators to change the number of proceses to spawn. - - .. code-block:: ini - - [conductor] - workers = 1 - - .. - .. code-block:: ini - - [engine] - workers = 1 - - .. - .. code-block:: ini - - [health_manager] - workers = 1 - - .. diff --git a/releasenotes/notes/support-status-f7383a53ddcae908.yaml b/releasenotes/notes/support-status-f7383a53ddcae908.yaml deleted file mode 100644 index 9d6d6bc07..000000000 --- a/releasenotes/notes/support-status-f7383a53ddcae908.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Profile type list and policy type list now returns the support status - for each type since API micro-version 1.5. diff --git a/releasenotes/notes/support-subnet-c2492ce8a377b1af.yaml b/releasenotes/notes/support-subnet-c2492ce8a377b1af.yaml deleted file mode 100644 index f60a0385b..000000000 --- a/releasenotes/notes/support-subnet-c2492ce8a377b1af.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Support create profile with subnet. Allow to define subnet in profile, default network port - will find a random subnet to create port on, user can define specific subnet. diff --git a/releasenotes/notes/support-volume-type-07d608097c711460.yaml b/releasenotes/notes/support-volume-type-07d608097c711460.yaml deleted file mode 100644 index dead28409..000000000 --- a/releasenotes/notes/support-volume-type-07d608097c711460.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Support user create volume with volume type to bdm_v2. diff --git a/releasenotes/notes/switch-to-alembic-migrations-f442d0b58c3f13a6.yaml b/releasenotes/notes/switch-to-alembic-migrations-f442d0b58c3f13a6.yaml deleted file mode 100644 index 23cd57c2c..000000000 --- a/releasenotes/notes/switch-to-alembic-migrations-f442d0b58c3f13a6.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Senlin will now use Alembic migrations for database schema updates. diff --git a/releasenotes/notes/tempest-api-test-support-c86091a7ba5fb789.yaml b/releasenotes/notes/tempest-api-test-support-c86091a7ba5fb789.yaml deleted file mode 100644 index a03ab093c..000000000 --- a/releasenotes/notes/tempest-api-test-support-c86091a7ba5fb789.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Tempest API test for all Senlin API interfaces for both positive - and negative cases. diff --git a/releasenotes/notes/tempest-functional-test-383dad4d9acff97e.yaml b/releasenotes/notes/tempest-functional-test-383dad4d9acff97e.yaml deleted file mode 100644 index d951c1c96..000000000 --- a/releasenotes/notes/tempest-functional-test-383dad4d9acff97e.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Reimplement functional test using tempest. diff --git a/releasenotes/notes/template-url-19075b68d9a35a80.yaml b/releasenotes/notes/template-url-19075b68d9a35a80.yaml deleted file mode 100644 index 71ae80846..000000000 --- a/releasenotes/notes/template-url-19075b68d9a35a80.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added 'template_url' support to heat stack profile. diff --git a/releasenotes/notes/test-python3-train-253c0e054dd9d1e3.yaml b/releasenotes/notes/test-python3-train-253c0e054dd9d1e3.yaml deleted file mode 100644 index 49014cada..000000000 --- a/releasenotes/notes/test-python3-train-253c0e054dd9d1e3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Add Python 3 Train unit tests.Add Python 3 Train unit tests. This is one of - global goal in Train cycle. diff --git a/releasenotes/notes/test-python3-victoria-ec16705d40a167c0.yaml b/releasenotes/notes/test-python3-victoria-ec16705d40a167c0.yaml deleted file mode 100644 index 42c7a8b62..000000000 --- a/releasenotes/notes/test-python3-victoria-ec16705d40a167c0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - | - Add Python3 victoria unit tests diff --git a/releasenotes/notes/timestamp-datatype-86c0e47debffa919.yaml b/releasenotes/notes/timestamp-datatype-86c0e47debffa919.yaml deleted file mode 100644 index 9e4d7eabc..000000000 --- a/releasenotes/notes/timestamp-datatype-86c0e47debffa919.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The data type problem related to action start time and end time is fixed. - We now use decimal type instead of float for these columns. diff --git a/releasenotes/notes/tools-setup-d73e3298328c5355.yaml b/releasenotes/notes/tools-setup-d73e3298328c5355.yaml deleted file mode 100644 index 1367577af..000000000 --- a/releasenotes/notes/tools-setup-d73e3298328c5355.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The 'tools/setup-service' script has been fixed so that it works under - keystone v3. diff --git a/releasenotes/notes/trigger-version-af674cfe0f4693cd.yaml b/releasenotes/notes/trigger-version-af674cfe0f4693cd.yaml deleted file mode 100644 index e8e6a6d90..000000000 --- a/releasenotes/notes/trigger-version-af674cfe0f4693cd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - The 'V' query parameter when triggering a webhook receiver is strictly - required. diff --git a/releasenotes/notes/unicode-az-ee5ea4346b36eefb.yaml b/releasenotes/notes/unicode-az-ee5ea4346b36eefb.yaml deleted file mode 100644 index 427a86368..000000000 --- a/releasenotes/notes/unicode-az-ee5ea4346b36eefb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support to unicode availability zone names. diff --git a/releasenotes/notes/unicode-cluster-name-3bd5b6eeac2566f1.yaml b/releasenotes/notes/unicode-cluster-name-3bd5b6eeac2566f1.yaml deleted file mode 100644 index 8c36c622c..000000000 --- a/releasenotes/notes/unicode-cluster-name-3bd5b6eeac2566f1.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Added support to use Unicode string for cluster names. diff --git a/releasenotes/notes/versioned-rpc-requests-2df5d878c279e933.yaml b/releasenotes/notes/versioned-rpc-requests-2df5d878c279e933.yaml deleted file mode 100644 index 6c7c7db1b..000000000 --- a/releasenotes/notes/versioned-rpc-requests-2df5d878c279e933.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - RPC requests from the API service to the engine service are fully managed - using versioned objects now. This will enable a smooth upgrade for the - service in future. diff --git a/releasenotes/notes/vm-lock-unlock-da4c3095575c9c94.yaml b/releasenotes/notes/vm-lock-unlock-da4c3095575c9c94.yaml deleted file mode 100644 index 978218e68..000000000 --- a/releasenotes/notes/vm-lock-unlock-da4c3095575c9c94.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added support to lock and unlock a nova server node. diff --git a/releasenotes/notes/vm-migrate-6c6adee51ee8ed24.yaml b/releasenotes/notes/vm-migrate-6c6adee51ee8ed24.yaml deleted file mode 100644 index 875083096..000000000 --- a/releasenotes/notes/vm-migrate-6c6adee51ee8ed24.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to migrate a nova server node. diff --git a/releasenotes/notes/vm-pause-unpause-3e414ce4d86c7ed3.yaml b/releasenotes/notes/vm-pause-unpause-3e414ce4d86c7ed3.yaml deleted file mode 100644 index 369ced0a9..000000000 --- a/releasenotes/notes/vm-pause-unpause-3e414ce4d86c7ed3.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to pause and unpause a nova server node. diff --git a/releasenotes/notes/vm-rescue-unrescue-f56047419c50e957.yaml b/releasenotes/notes/vm-rescue-unrescue-f56047419c50e957.yaml deleted file mode 100644 index cea8d56f9..000000000 --- a/releasenotes/notes/vm-rescue-unrescue-f56047419c50e957.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to rescue and unrescue a nova server node. diff --git a/releasenotes/notes/vm-start-stop-e590e25a04fff1e0.yaml b/releasenotes/notes/vm-start-stop-e590e25a04fff1e0.yaml deleted file mode 100644 index 2f20fe726..000000000 --- a/releasenotes/notes/vm-start-stop-e590e25a04fff1e0.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support to start and stop a nova server node. diff --git a/releasenotes/notes/vm-suspend-resume-a4398520255e6bbd.yaml b/releasenotes/notes/vm-suspend-resume-a4398520255e6bbd.yaml deleted file mode 100644 index e5219f1c1..000000000 --- a/releasenotes/notes/vm-suspend-resume-a4398520255e6bbd.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added operation support for suspending and resuming a nova server node. diff --git a/releasenotes/notes/webhook-fix-792322c0b7f374aa.yaml b/releasenotes/notes/webhook-fix-792322c0b7f374aa.yaml deleted file mode 100644 index e2d926d8d..000000000 --- a/releasenotes/notes/webhook-fix-792322c0b7f374aa.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Fixed a bug where API version negotiation is not effective when invoked via - OpenStack SDK. The API impacted is limited to webhook triggering. diff --git a/releasenotes/notes/zaqar-support-470e824b7737e939.yaml b/releasenotes/notes/zaqar-support-470e824b7737e939.yaml deleted file mode 100644 index b28ebe7ab..000000000 --- a/releasenotes/notes/zaqar-support-470e824b7737e939.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Zaqar resources including "queue", "message", "subscription" and - "claim" are now supported in Senlin driver. diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst deleted file mode 100644 index d1238479b..000000000 --- a/releasenotes/source/2023.1.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -2023.1 Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/2023.1 diff --git a/releasenotes/source/2023.2.rst b/releasenotes/source/2023.2.rst deleted file mode 100644 index a4838d7d0..000000000 --- a/releasenotes/source/2023.2.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -2023.2 Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/2023.2 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 93c3196c2..000000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,259 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Senlin Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 24 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'openstackdocstheme', - 'reno.sphinxext', -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/senlin' -openstackdocs_bug_project = 'senlin' -openstackdocs_bug_tag = '' - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -copyright = '2015, Senlin Developers' - -# Release notes are version independent. -# The short X.Y version. - -# The full version, including alpha/beta/rc tags. -release = '' -# The short X.Y version. -version = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'SenlinReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'SenlinReleaseNotes.tex', 'Senlin Release Notes Documentation', - 'Senlin Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'senlinreleasenotes', 'Senlin Release Notes Documentation', - ['Senlin Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'SenlinReleaseNotes', 'Senlin Release Notes Documentation', - 'Senlin Developers', 'SenlinReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 3f70bed88..000000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Senlin Release Notes -==================== - -.. toctree:: - :maxdepth: 1 - - unreleased - 2023.2 - 2023.1 - zed - yoga - xena - wallaby - victoria - ussuri - train - stein - rocky - queens - pike - ocata - newton - mitaka diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 76a482805..000000000 --- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,166 +0,0 @@ -# Andi Chandler , 2018. #zanata -# Andi Chandler , 2022. #zanata -msgid "" -msgstr "" -"Project-Id-Version: senlin\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2022-06-21 05:54+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2022-08-04 12:28+0000\n" -"Last-Translator: Andi Chandler \n" -"Language-Team: English (United Kingdom)\n" -"Language: en_GB\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "1.0.0" -msgstr "1.0.0" - -msgid "10.0.0-4" -msgstr "10.0.0-4" - -msgid "11.0.0" -msgstr "11.0.0" - -msgid "2.0.0" -msgstr "2.0.0" - -msgid "2.0.0.0b1" -msgstr "2.0.0.0b1" - -msgid "2.0.0.0b2" -msgstr "2.0.0.0b2" - -msgid "2.0.0.0b3" -msgstr "2.0.0.0b3" - -msgid "2.0.0.0rc1" -msgstr "2.0.0.0rc1" - -msgid "3.0.0" -msgstr "3.0.0" - -msgid "3.0.1" -msgstr "3.0.1" - -msgid "4.0.0" -msgstr "4.0.0" - -msgid "5.0.0" -msgstr "5.0.0" - -msgid "6.0.0" -msgstr "6.0.0" - -msgid "7.0.0" -msgstr "7.0.0" - -msgid "8.0.0" -msgstr "8.0.0" - -msgid "9.0.0" -msgstr "9.0.0" - -msgid "" -"A ``action_purge`` subcommand is added to ``senlin-manage`` tool for purging " -"actions from the actions table." -msgstr "" -"A ``action_purge`` subcommand is added to ``senlin-manage`` tool for purging " -"actions from the actions table." - -msgid "" -"A cluster in the middle of an on-going action should not be deletable. The " -"engine service has been improved to detect this situation." -msgstr "" -"A cluster in the middle of an on-going action should not be deletable. The " -"engine service has been improved to detect this situation." - -msgid "" -"A configuration option \"exclude_derived_actions\" is introduced into the " -"\"dispatchers\" group for controlling whether derived actions should lead " -"into event notifications and/or DB records." -msgstr "" -"A configuration option \"exclude_derived_actions\" is introduced into the " -"\"dispatchers\" group for controlling whether derived actions should lead " -"into event notifications and/or DB records." - -msgid "" -"A event_purge subcommand is added to senlin-manage tool for purging events " -"generated in a specific project." -msgstr "" -"A event_purge subcommand is added to senlin-manage tool for purging events " -"generated in a specific project." - -msgid "" -"A new \"lb_status_timeout\" option is added to the LB policy to cope with " -"load-balancers that are not so responsive." -msgstr "" -"A new \"lb_status_timeout\" option is added to the LB policy to cope with " -"load-balancers that are not so responsive." - -msgid "Current Series Release Notes" -msgstr "Current Series Release Notes" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka Series Release Notes" - -msgid "Newton Series Release Notes" -msgstr "Newton Series Release Notes" - -msgid "Ocata Series Release Notes" -msgstr "Ocata Series Release Notes" - -msgid "Pike Series Release Notes" -msgstr "Pike Series Release Notes" - -msgid "Senlin Release Notes" -msgstr "Senlin Release Notes" - -msgid "" -"When referenced objects are not found in an API request, 400 is returned now." -msgstr "" -"When referenced objects are not found in an API request, 400 is returned now." - -msgid "" -"With the introduction of these new services new configuration options were " -"added to allow operators to change the number of proceses to spawn." -msgstr "" -"With the introduction of these new services, new configuration options were " -"added to allow operators to change the number of processes to spawn." - -msgid "" -"With the new 'profile-validate' API, the nova server profile now supports " -"the validation of its 'flavor', 'image' (if provided), 'availability_zone' " -"and block device driver properties." -msgstr "" -"With the new 'profile-validate' API, the Nova server profile now supports " -"the validation of its 'flavour', 'image' (if provided), 'availability_zone' " -"and block device driver properties." - -msgid "" -"With the newly added 'message' type of receivers, the 'cluster' and the " -"'action' property are not always required when creating a receiver. They are " -"still required if the receiver type is 'webhook' (the default)." -msgstr "" -"With the newly added 'message' type of receivers, the 'cluster' and the " -"'action' property are not always required when creating a receiver. They are " -"still required if the receiver type is 'webhook' (the default)." - -msgid "" -"Zaqar resources including \"queue\", \"message\", \"subscription\" and " -"\"claim\" are now supported in Senlin driver." -msgstr "" -"Zaqar resources including \"queue\", \"message\", \"subscription\" and " -"\"claim\" are now supported in Senlin driver." - -msgid "" -"[`blueprint action-update `_] A new action update API is added to allow the action " -"status to be updated. The only valid status value for update is CANCELLED." -msgstr "" -"[`blueprint action-update `_] A new action update API is added to allow the action " -"status to be updated. The only valid status value for the update is " -"CANCELLED." diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po deleted file mode 100644 index 2996e3b79..000000000 --- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,63 +0,0 @@ -# Gérald LONLAS , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: senlin\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2018-08-03 04:35+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-22 06:38+0000\n" -"Last-Translator: Gérald LONLAS \n" -"Language-Team: French\n" -"Language: fr\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" - -msgid "1.0.0" -msgstr "1.0.0" - -msgid "2.0.0" -msgstr "2.0.0" - -msgid "2.0.0.0b1" -msgstr "2.0.0.0b1" - -msgid "2.0.0.0b2" -msgstr "2.0.0.0b2" - -msgid "2.0.0.0b3" -msgstr "2.0.0.0b3" - -msgid "2.0.0.0rc1" -msgstr "2.0.0.0rc1" - -msgid "Bug Fixes" -msgstr "Corrections de bugs" - -msgid "Current Series Release Notes" -msgstr "Note de la release actuelle" - -msgid "Deprecation Notes" -msgstr "Notes dépréciées " - -msgid "Mitaka Series Release Notes" -msgstr "Note de release pour Mitaka" - -msgid "New Features" -msgstr "Nouvelles fonctionnalités" - -msgid "Newton Series Release Notes" -msgstr "Note de release pour Newton" - -msgid "Other Notes" -msgstr "Autres notes" - -msgid "Security Issues" -msgstr "Problèmes de sécurités" - -msgid "Senlin Release Notes" -msgstr "Note de release pour Senlin" - -msgid "Upgrade Notes" -msgstr "Notes de mises à jours" diff --git a/releasenotes/source/locale/zh_CN/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/zh_CN/LC_MESSAGES/releasenotes.po deleted file mode 100644 index f4c21c316..000000000 --- a/releasenotes/source/locale/zh_CN/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,212 +0,0 @@ -# Wenyan Wang , 2016. #zanata -# zzxwill , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: Senlin Release Notes\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2018-03-01 06:43+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-09-23 12:24+0000\n" -"Last-Translator: Wenyan Wang \n" -"Language-Team: Chinese (China)\n" -"Language: zh_CN\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "1.0.0" -msgstr "1.0.0" - -msgid "2.0.0.0b1" -msgstr "2.0.0.0b1" - -msgid "2.0.0.0b2" -msgstr "2.0.0.0b2" - -msgid "2.0.0.0b3" -msgstr "2.0.0.0b3" - -msgid "2.0.0.0rc1" -msgstr "2.0.0.0rc1" - -msgid "A new ``cluster_collect`` API is added." -msgstr "添加了一个新的API``cluster_collect``。" - -msgid "" -"A new policy-validate API has been added to validate the spec of a policy " -"without actually creating an instance of it." -msgstr "" -"添加了一种新的验证策略的API用来验证策略的规范而实际上不需要创建它的一个实例。" - -msgid "Action list now can be filtered by its 'status' property." -msgstr "Action列表现在可以通过它的status属性过滤。" - -msgid "Add support to update image property of a Nova server." -msgstr "为更新Nova server的镜像属性添加支持。" - -msgid "Added LBaaS health monitor support to load-balancing policy v1.0." -msgstr "负载均衡策略1.0版添加了LBaaS健康监测支持。" - -msgid "" -"Added command 'senlin-manage service clean' to clean the dead engine records." -msgstr "添加了命令“senlin-manage service clean”来清理死亡的engine记录。" - -msgid "" -"Added command 'senlin-manage service list' to show the status of engine." -msgstr "添加了命令“senlin-manage service list”来显示engine的状态。" - -msgid "Added configuration option for enforcing name uniqueness." -msgstr "添加了配置选项来保障名称一致性。" - -msgid "Added developer documentation for 'receiver'." -msgstr "为'receiver'添加了开发者文档。" - -msgid "" -"Added documentation for lb policy, affinity policy, scaling policy, zone " -"placement policy and region placement policy." -msgstr "" -"为lb policy, affinity policy, scaling policy, zone placement policy和region " -"placement policy添加了文档。" - -msgid "Added documentation for senlin.policy.deletion-v1.0." -msgstr "为senlin.policy.deletion-v1.0添加了文档。" - -msgid "Added new APIs for cluster/node check and recover." -msgstr "为集群和节点的检查和恢复添加了新的API。" - -msgid "Added parameter checking for cluster-policy-detach API invocation." -msgstr "为API cluster-policy-detach的调用添加了参数检查。" - -msgid "Added parameter checking for cluster-policy-update API invocation." -msgstr "为API cluster-policy-update的调用添加了参数检查。" - -msgid "Added parameter checking for policy-create API calls." -msgstr "为API policy-create的调用添加了参数检查。" - -msgid "Added parameter sanitization for cluster-policy-attach." -msgstr "为cluster-policy-attach添加了参数过滤。" - -msgid "Added profile property checking regarding whether they are updatable." -msgstr "添加了样版属性检查,不论样版是否可更新。" - -msgid "" -"Added senlin.policy.affinity-v1.0 which can be used to control how VM " -"servers are placed based on nova servergroup settings." -msgstr "" -"添加了senlin.policy.affinity-v1.0,它可以被用来控制基于nova servergroup设置的" -"虚拟机服务器如何被place。" - -msgid "Added support of multi-tenancy for actions." -msgstr "为action添加了多租户支持。" - -msgid "Added support to limit number of clusters per project." -msgstr "为限定每个项目的集群个数添加了支持。" - -msgid "" -"Added support to multi-tenancy (aka. project_safe checking) when finding " -"resources." -msgstr "当查找资源时,添加了多租户支持,也就是project_safe检查。" - -msgid "Added support to multi-tenancy for event resources." -msgstr "为事件资源添加了多租户支持。" - -msgid "" -"Added support to oslo.versionedobject so that DB interactions are " -"abstracted. It is possible to do live upgrade for senlin service now." -msgstr "" -"为oslo.versionedobject添加了支持,这样抽象了数据库交互,senlin服务的热升级成" -"为了可能。" - -msgid "Added support to updating network properties of a nova server." -msgstr "为更新nova服务器的网络属性添加了支持。" - -msgid "Added user documentation for 'receiver'." -msgstr "为‘receiver’添加了用户文档。" - -msgid "" -"Both image ID and image name are supported when creating os.nova.server " -"profile." -msgstr "当创建os.nova.server样版时,镜像ID和名称都是支持的。" - -msgid "Bug Fixes" -msgstr "Bug修复" - -msgid "" -"Clusters now have a new 'RESIZING' status when its scale is being changed." -msgstr "当集群的规模正在被改变时,它现在有一个新状态'RESIZING'。" - -msgid "Command `senlin-manage purge_deleted` is removed." -msgstr "命令`senlin-manage purge_deleted`被移除了。" - -msgid "Current Series Release Notes" -msgstr "当前版本发布说明" - -msgid "" -"DB columns obj_id, obj_type and obj_name in the event table are now renamed " -"to oid, otype and oname correspondingly." -msgstr "" -"在事件表中的DB列obj_id, obj_type以及obj_name目前被重新改名为 oid, otype以及 " -"oname 。" - -msgid "Deprecation Notes" -msgstr "弃用说明" - -msgid "Enabled update to the 'flavor' of a nova server profile." -msgstr "允许nova服务器的样版的‘flavor’属性的更新操作。" - -msgid "Enabled update to the 'name' of a nova server profile." -msgstr "允许nova服务器的样版的‘name’属性的更新操作。" - -msgid "Engine scheduler was redesigned to work in \"tickless\" way." -msgstr "引擎调度器被重新设计为以\"tickless\"方式工作。" - -msgid "" -"Ensure there are no underscores ('_') in resource names exposed through " -"RESTful API" -msgstr "请确保通过RESTful API暴漏的资源名称中没有下划线('_') 。" - -msgid "Event list can now be filtered by its 'level' property." -msgstr "事件列表现在可以通过'level'属性过滤。" - -msgid "Mitaka Series Release Notes" -msgstr "Mitaka版本发布说明" - -msgid "New Features" -msgstr "新特性" - -msgid "Other Notes" -msgstr "其他说明" - -msgid "Removed documentation for 'webhook'." -msgstr "移除了'webhook'的文档。" - -msgid "Security Issues" -msgstr "安全问题" - -msgid "Senlin API documentation merged into api-site and published." -msgstr "Senlin API文档合并到api站点并发布。" - -msgid "" -"Senlin API has removed 'tenant_id' from its endpoint. This means users have " -"to recreate their keystone endpoints if they have an old installation." -msgstr "" -"Senlin API从它的端点中删除了'tenant_id'。这意味着用户如果已经有了一个老的安" -"装,则必须重新创建他们的keystone端点。" - -msgid "" -"Senlin API/Engine configuration options are now documented and published " -"online." -msgstr "Senlin API/引擎配置选项现在已经文档话并且在线发布了。" - -msgid "Senlin Release Notes" -msgstr "Senlin发布说明" - -msgid "Status `DELETED` is removed from clusters and nodes." -msgstr "状态`DELETED`从集群和节点中删除了。" - -msgid "Supporting engine status check, with senlin-manage command." -msgstr "使用senlin管理命令,支持引擎状态检查。" - -msgid "Upgrade Notes" -msgstr "升级说明" diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index 05ed3e48e..000000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Mitaka Series Release Notes -=========================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index a737f9aef..000000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Newton Series Release Notes -=========================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index 5c437c8de..000000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Ocata Series Release Notes -========================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst deleted file mode 100644 index 9184412d3..000000000 --- a/releasenotes/source/pike.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Pike Series Release Notes -========================= - -.. release-notes:: - :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst deleted file mode 100644 index 3e1243834..000000000 --- a/releasenotes/source/queens.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Queens Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst deleted file mode 100644 index 40dd517b7..000000000 --- a/releasenotes/source/rocky.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Rocky Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst deleted file mode 100644 index efaceb667..000000000 --- a/releasenotes/source/stein.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Stein Series Release Notes -=================================== - -.. release-notes:: - :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst deleted file mode 100644 index 583900393..000000000 --- a/releasenotes/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================== -Train Series Release Notes -========================== - -.. release-notes:: - :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index 875030f9d..000000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst deleted file mode 100644 index e21e50e0c..000000000 --- a/releasenotes/source/ussuri.rst +++ /dev/null @@ -1,6 +0,0 @@ -=========================== -Ussuri Series Release Notes -=========================== - -.. release-notes:: - :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst deleted file mode 100644 index 4efc7b6f3..000000000 --- a/releasenotes/source/victoria.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -Victoria Series Release Notes -============================= - -.. release-notes:: - :branch: stable/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst deleted file mode 100644 index d77b56599..000000000 --- a/releasenotes/source/wallaby.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================ -Wallaby Series Release Notes -============================ - -.. release-notes:: - :branch: stable/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst deleted file mode 100644 index 1be85be3e..000000000 --- a/releasenotes/source/xena.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Xena Series Release Notes -========================= - -.. release-notes:: - :branch: stable/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst deleted file mode 100644 index 43cafdea8..000000000 --- a/releasenotes/source/yoga.rst +++ /dev/null @@ -1,6 +0,0 @@ -========================= -Yoga Series Release Notes -========================= - -.. release-notes:: - :branch: unmaintained/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst deleted file mode 100644 index 9608c05e4..000000000 --- a/releasenotes/source/zed.rst +++ /dev/null @@ -1,6 +0,0 @@ -======================== -Zed Series Release Notes -======================== - -.. release-notes:: - :branch: stable/zed diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index bc93e0d5b..000000000 --- a/requirements.txt +++ /dev/null @@ -1,37 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -alembic>=1.6.5 # MIT -pbr>=3.1.1 # Apache-2.0 -docker>=2.4.2 # Apache-2.0 -eventlet>=0.26.1 # MIT -jsonpath-rw>=1.4.0 # Apache-2.0 -jsonschema>=3.2.0 # MIT -keystoneauth1>=3.18.0 # Apache-2.0 -keystonemiddleware>=4.17.0 # Apache-2.0 -microversion-parse>=0.2.1 # Apache-2.0 -openstacksdk>=0.99.0 # Apache-2.0 -oslo.config>=6.8.0 # Apache-2.0 -oslo.context>=2.22.0 # Apache-2.0 -oslo.db>=6.0.0 # Apache-2.0 -oslo.i18n>=3.20.0 # Apache-2.0 -oslo.log>=3.36.0 # Apache-2.0 -oslo.reports>=1.18.0 # Apache-2.0 -oslo.messaging>=14.1.0 # Apache-2.0 -oslo.middleware>=3.31.0 # Apache-2.0 -oslo.policy>=3.6.0 # Apache-2.0 -oslo.serialization>=2.25.0 # Apache-2.0 -oslo.service>=1.31.0 # Apache-2.0 -oslo.upgradecheck>=1.3.0 # Apache-2.0 -oslo.utils>=4.5.0 # Apache-2.0 -oslo.versionedobjects>=1.31.2 # Apache-2.0 -osprofiler>=2.3.0 # Apache-2.0 -PasteDeploy>=1.5.0 # MIT -pytz>=2015.7 # MIT -PyYAML>=5.1 # MIT -requests>=2.20.0 # Apache-2.0 -Routes>=2.3.1 # MIT -SQLAlchemy>=1.4.41 # MIT -stevedore>=1.20.0 # Apache-2.0 -tenacity>=6.0.0 # Apache-2.0 -WebOb>=1.7.1 # MIT diff --git a/senlin/__init__.py b/senlin/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/api/__init__.py b/senlin/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/api/common/__init__.py b/senlin/api/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/api/common/serializers.py b/senlin/api/common/serializers.py deleted file mode 100644 index a3189c9a3..000000000 --- a/senlin/api/common/serializers.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility methods for serializing responses -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import webob - -from senlin.common import exception -from senlin.common.i18n import _ - -LOG = logging.getLogger(__name__) - - -def is_json_content_type(request): - - content_type = request.content_type - if not content_type or content_type.startswith('text/plain'): - content_type = 'application/json' - - if (content_type in ('JSON', 'application/json') and - request.body.startswith(b'{')): - return True - return False - - -class JSONRequestDeserializer(object): - - def has_body(self, request): - """Return whether a Webob.Request object will possess an entity body. - - :param request: A Webob.Request object - """ - if request is None or request.content_length is None: - return False - - if request.content_length > 0 and is_json_content_type(request): - return True - - return False - - def from_json(self, datastring): - try: - if len(datastring) > cfg.CONF.senlin_api.max_json_body_size: - msg = _('JSON body size (%(len)s bytes) exceeds maximum ' - 'allowed size (%(limit)s bytes).' - ) % {'len': len(datastring), - 'limit': cfg.CONF.senlin_api.max_json_body_size} - raise exception.RequestLimitExceeded(message=msg) - return jsonutils.loads(datastring) - except ValueError as ex: - raise webob.exc.HTTPBadRequest(str(ex)) - - def default(self, request): - if self.has_body(request): - return {'body': self.from_json(request.body)} - else: - return {} - - -class JSONResponseSerializer(object): - - def to_json(self, data): - def sanitizer(obj): - if isinstance(obj, datetime.datetime): - return obj.isoformat() - return str(obj) - - response = jsonutils.dumps(data, default=sanitizer, sort_keys=True) - LOG.debug("JSON response : %s", response) - return response - - def default(self, response, result): - response.content_type = 'application/json' - response.body = encodeutils.safe_encode(self.to_json(result)) diff --git a/senlin/api/common/util.py b/senlin/api/common/util.py deleted file mode 100644 index f9f61f444..000000000 --- a/senlin/api/common/util.py +++ /dev/null @@ -1,118 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -import jsonschema -from oslo_utils import strutils -from webob import exc - -from senlin.common.i18n import _ -from senlin.common import policy -from senlin.objects import base as obj_base - - -def policy_enforce(handler): - """Decorator that enforces policies. - - Check the path matches the request context and enforce policy defined in - policy file and policies in code. - - This is a handler method decorator. - """ - @functools.wraps(handler) - def policy_checker(controller, req, **kwargs): - # Enable project_id based target check - rule = "%s:%s" % (controller.REQUEST_SCOPE, - handler.__name__) - allowed = policy.enforce(context=req.context, rule=rule, target={}) - if not allowed: - raise exc.HTTPForbidden() - return handler(controller, req, **kwargs) - - return policy_checker - - -def parse_request(name, req, body, key=None): - """Formalize an API request and validate it. - - :param name: The name for a versioned request object. - :param req: Reference to a WSGI request object. - :param body: The JSON body (if any) that accompanies a request. Could be - augmented by controller before getting passed here. - :param key: An optional key indicating the inner object for a request. - :returns: A validated, versioned request object - """ - try: - req_cls = obj_base.SenlinObject.obj_class_from_name(name) - except Exception as ex: - raise exc.HTTPBadRequest(str(ex)) - - try: - primitive = req_cls.normalize_req(name, body, key) - except ValueError as ex: - raise exc.HTTPBadRequest(str(ex)) - - version = req_cls.find_version(req.context) - obj = None - try: - obj = req_cls.obj_from_primitive(primitive) - jsonschema.validate(primitive, obj.to_json_schema()) - except ValueError as ex: - raise exc.HTTPBadRequest(str(ex)) - except jsonschema.exceptions.ValidationError as ex: - raise exc.HTTPBadRequest(str(ex.message)) - - # Do version coversion if necessary - if obj is not None and version != req_cls.VERSION: - obj.obj_make_compatible(primitive, version) - return req_cls.obj_from_primitive(primitive) - - return obj - - -def get_allowed_params(params, whitelist): - """Extract from ``params`` all entries listed in ``whitelist``. - - The returning dict will contain an entry for a key if, and only if, - there's an entry in ``whitelist`` for that key and at least one entry in - ``params``. If ``params`` contains multiple entries for the same key, it - will yield an array of values: ``{key: [v1, v2,...]}`` - - :param params: a NestedMultiDict from webob.Request.params - :param whitelist: an array of strings to whitelist - - :returns: a dict with {key: value} pairs - """ - allowed_params = {} - - for key, get_type in whitelist.items(): - value = None - if get_type == 'single': - value = params.get(key) - elif get_type in ('mixed', 'multi'): - value = params.getall(key) - - if value: - allowed_params[key] = value - - return allowed_params - - -def parse_bool_param(name, value): - if str(value).lower() not in ('true', 'false'): - msg = _("Invalid value '%(value)s' specified for '%(name)s'" - ) % {'name': name, 'value': value} - raise exc.HTTPBadRequest(msg) - - return strutils.bool_from_string(value, strict=True) diff --git a/senlin/api/common/version_request.py b/senlin/api/common/version_request.py deleted file mode 100644 index 6ebf9fdd6..000000000 --- a/senlin/api/common/version_request.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from senlin.common import exception -from senlin.common.i18n import _ - - -class APIVersionRequest(object): - """An API Version Request object.""" - - def __init__(self, version_string=None): - """Initialize an APIVersionRequest object. - - :param version_string: String representation of APIVersionRequest. - Correct format is 'X.Y', where 'X' and 'Y' are int values. - None value should be used to create Null APIVersionRequest, - which is equal to '0.0'. - """ - self.major = 0 - self.minor = 0 - - if version_string is not None: - match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) - if match: - self.major = int(match.group(1)) - self.minor = int(match.group(2)) - else: - raise exception.InvalidAPIVersionString(version=version_string) - - def __str__(self): - return "%s.%s" % (self.major, self.minor) - - def is_null(self): - return self.major == 0 and self.minor == 0 - - def _type_error(self, other): - return TypeError(_("'%(other)s' must be an instance of '%(cls)s'") % - {"other": other, "cls": self.__class__}) - - def __lt__(self, other): - if not isinstance(other, APIVersionRequest): - raise self._type_error(other) - - return ((self.major, self.minor) < (other.major, other.minor)) - - def __eq__(self, other): - if not isinstance(other, APIVersionRequest): - raise self._type_error(other) - - return ((self.major, self.minor) == (other.major, other.minor)) - - def __gt__(self, other): - if not isinstance(other, APIVersionRequest): - raise self._type_error(other) - - return ((self.major, self.minor) > (other.major, other.minor)) - - def __le__(self, other): - return self < other or self == other - - def __ne__(self, other): - return not self.__eq__(other) - - def __ge__(self, other): - return self > other or self == other - - def matches(self, min_version, max_version): - """Check this object matches the specified min and/or max. - - This function checks if this version >= the provided min_version - and this version <= the provided max_version. - - :param min_version: Minimum acceptable version. There is no minimum - limit if this is null. - :param max_version: Maximum acceptable version. There is no maximum - limit if this is null. - :returns: A boolean indicating whether the version matches. - :raises: ValueError if self is null. - """ - if self.is_null(): - raise ValueError - if max_version.is_null() and min_version.is_null(): - return True - elif max_version.is_null(): - return min_version <= self - elif min_version.is_null(): - return self <= max_version - else: - return min_version <= self <= max_version diff --git a/senlin/api/common/versioned_method.py b/senlin/api/common/versioned_method.py deleted file mode 100644 index 10a0bcb6d..000000000 --- a/senlin/api/common/versioned_method.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class VersionedMethod(object): - - def __init__(self, name, min_version, max_version, func): - """Versioning information for a single method - - Minimums and maximums are inclusive - :param name: Name of the method - :param min_version: Minimum acceptable version - :param max_version: Maximum acceptable_version - :param func: Method to call - - """ - self.name = name - self.min_version = min_version - self.max_version = max_version - self.func = func - - def __str__(self): - return ("Version Method %(name)s: min: %(min)s, max: %(max)s" % - {"name": self.name, "min": self.min_version, - "max": self.max_version}) diff --git a/senlin/api/common/wsgi.py b/senlin/api/common/wsgi.py deleted file mode 100644 index 237f93470..000000000 --- a/senlin/api/common/wsgi.py +++ /dev/null @@ -1,943 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utility methods for working with WSGI servers -""" - -import abc -import errno -import os -import signal -import sys -import time - -import eventlet -from eventlet.green import socket -from eventlet.green import ssl -import eventlet.wsgi -import functools -from oslo_config import cfg -import oslo_i18n -from oslo_log import log as logging -from oslo_utils import importutils -from paste import deploy -from routes import middleware -import webob -from webob import dec as webob_dec -from webob import exc - -from senlin.api.common import serializers -from senlin.api.common import version_request -from senlin.api.common import versioned_method -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.rpc import client as rpc_client - - -LOG = logging.getLogger(__name__) -URL_LENGTH_LIMIT = 50000 -DEFAULT_API_VERSION = '1.0' -API_VERSION_KEY = 'OpenStack-API-Version' -VER_METHOD_ATTR = 'versioned_methods' - - -def get_bind_addr(conf, default_port=None): - return conf.bind_host, conf.bind_port or default_port - - -def get_socket(conf, default_port): - """Bind socket to bind ip:port in conf - - :param conf: a cfg.ConfigOpts object - :param default_port: port to bind to if none is specified in conf - - :returns : a socket object as returned from socket.listen or - ssl.wrap_socket if conf specifies cert_file - """ - - bind_addr = get_bind_addr(conf, default_port) - - # TODO(jaypipes): eventlet's greened socket module does not actually - # support IPv6 in getaddrinfo(). We need to get around this in the - # future or monitor upstream for a fix - address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0], - bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) - if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] - - cert_file = conf.cert_file - key_file = conf.key_file - use_ssl = cert_file or key_file - if use_ssl and (not cert_file or not key_file): - raise RuntimeError(_("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - - sock = None - retry_until = time.time() + 30 - while not sock and time.time() < retry_until: - try: - sock = eventlet.listen(bind_addr, backlog=conf.backlog, - family=address_family) - except socket.error as err: - if err.args[0] != errno.EADDRINUSE: - raise - eventlet.sleep(0.1) - - if not sock: - raise RuntimeError(_("Could not bind to %(bind_addr)s after trying" - " 30 seconds") % {'bind_addr': bind_addr}) - return sock - - -class Server(object): - """Server class to manage multiple WSGI sockets and applications.""" - - def __init__(self, name, conf, threads=1000): - os.umask(0o27) # ensure files are created with the correct privileges - self._logger = logging.getLogger("eventlet.wsgi.server") - self.name = name - self.threads = threads - self.children = set() - self.stale_children = set() - self.running = True - self.pgid = os.getpid() - self.conf = conf - try: - os.setpgid(self.pgid, self.pgid) - except OSError: - self.pgid = 0 - - def kill_children(self, *args): - """Kill the entire process group.""" - - LOG.error('SIGTERM received') - signal.signal(signal.SIGTERM, signal.SIG_IGN) - signal.signal(signal.SIGINT, signal.SIG_IGN) - self.running = False - os.killpg(0, signal.SIGTERM) - - def hup(self, *args): - """Reload configuration files with zero down time.""" - - LOG.error('SIGHUP received') - signal.signal(signal.SIGHUP, signal.SIG_IGN) - raise exception.SIGHUPInterrupt - - def start(self, application, default_port): - """Run a WSGI server with the given application. - - :param application: The application to run in the WSGI server - :param default_port: Port to bind to if none is specified in conf - """ - - eventlet.wsgi.MAX_HEADER_LINE = self.conf.max_header_line - self.application = application - self.default_port = default_port - self.configure_socket() - self.start_wsgi() - - def start_wsgi(self): - if self.conf.workers == 0: - # Useful for profiling, test, debug etc. - self.pool = eventlet.GreenPool(size=self.threads) - self.pool.spawn_n(self._single_run, self.application, self.sock) - return - - LOG.info("Starting %d workers", self.conf.workers) - signal.signal(signal.SIGTERM, self.kill_children) - signal.signal(signal.SIGINT, self.kill_children) - signal.signal(signal.SIGHUP, self.hup) - while len(self.children) < self.conf.workers: - self.run_child() - - def wait_on_children(self): - """Wait on children exit.""" - - while self.running: - try: - pid, status = os.wait() - if os.WIFEXITED(status) or os.WIFSIGNALED(status): - self._remove_children(pid) - self._verify_and_respawn_children(pid, status) - except OSError as err: - if err.errno not in (errno.EINTR, errno.ECHILD): - raise - except KeyboardInterrupt: - LOG.info('Caught keyboard interrupt. Exiting.') - os.killpg(0, signal.SIGTERM) - break - except exception.SIGHUPInterrupt: - self.reload() - continue - - eventlet.greenio.shutdown_safe(self.sock) - self.sock.close() - LOG.debug('Exited') - - def configure_socket(self, old_conf=None, has_changed=None): - """Ensure a socket exists and is appropriately configured. - - This function is called on start up, and can also be - called in the event of a configuration reload. - - When called for the first time a new socket is created. - If reloading and either bind_host or bind_port have been - changed, the existing socket must be closed and a new - socket opened (laws of physics). - - In all other cases (bind_host/bind_port have not been changed) - the existing socket is reused. - - :param old_conf: Cached old configuration settings (if any) - :param has_changed: callable to determine if a parameter has changed - """ - - new_sock = (old_conf is None or ( - has_changed('bind_host') or - has_changed('bind_port'))) - # check https - use_ssl = not (not self.conf.cert_file or not self.conf.key_file) - # Were we using https before? - old_use_ssl = (old_conf is not None and not ( - not old_conf.get('key_file') or - not old_conf.get('cert_file'))) - # Do we now need to perform an SSL wrap on the socket? - wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock) - # Do we now need to perform an SSL unwrap on the socket? - unwrap_sock = use_ssl is False and old_use_ssl is True - - if new_sock: - self._sock = None - if old_conf is not None: - self.sock.close() - - _sock = get_socket(self.conf, self.default_port) - _sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - # sockets can hang around forever without keepalive - _sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - self._sock = _sock - - if wrap_sock: - self.sock = ssl.wrap_socket(self._sock, - certfile=self.conf.cert_file, - keyfile=self.conf.key_file) - - if unwrap_sock: - self.sock = self._sock - - if new_sock and not use_ssl: - self.sock = self._sock - - # Pick up newly deployed certs - if old_conf is not None and use_ssl is True and old_use_ssl is True: - if has_changed('cert_file'): - self.sock.certfile = self.conf.cert_file - if has_changed('key_file'): - self.sock.keyfile = self.conf.key_file - - if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): - # This option isn't available in the OS X version of eventlet - if hasattr(socket, 'TCP_KEEPIDLE'): - self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, - self.conf.tcp_keepidle) - - if old_conf is not None and has_changed('backlog'): - self.sock.listen(self.conf.backlog) - - def _remove_children(self, pid): - - if pid in self.children: - self.children.remove(pid) - LOG.info('Removed dead child %s', pid) - elif pid in self.stale_children: - self.stale_children.remove(pid) - LOG.info('Removed stale child %s', pid) - else: - LOG.warning('Unrecognized child %s', pid) - - def _verify_and_respawn_children(self, pid, status): - if len(self.stale_children) == 0: - LOG.debug('No stale children') - - if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: - LOG.error('Not respawning child %d, cannot ' - 'recover from termination', pid) - if not self.children and not self.stale_children: - LOG.info('All workers have terminated. Exiting') - self.running = False - else: - if len(self.children) < self.conf.workers: - self.run_child() - - def stash_conf_values(self): - """Make a copy of some of the current global CONF's settings. - - Allow determining if any of these values have changed - when the config is reloaded. - """ - conf = {} - conf['bind_host'] = self.conf.bind_host - conf['bind_port'] = self.conf.bind_port - conf['backlog'] = self.conf.backlog - conf['key_file'] = self.conf.key_file - conf['cert_file'] = self.conf.cert_file - return conf - - def reload(self): - """Reload and re-apply configuration settings. - - Existing child processes are sent a SIGHUP signal and will exit after - completing existing requests. New child processes, which will have the - updated configuration, are spawned. This allows preventing - interruption to the service. - """ - def _has_changed(old, new, param): - old = old.get(param) - new = getattr(new, param) - return (new != old) - - old_conf = self.stash_conf_values() - has_changed = functools.partial(_has_changed, old_conf, self.conf) - cfg.CONF.reload_config_files() - os.killpg(self.pgid, signal.SIGHUP) - self.stale_children = self.children - self.children = set() - - # Ensure any logging config changes are picked up - logging.setup(cfg.CONF, self.name) - - self.configure_socket(old_conf, has_changed) - self.start_wsgi() - - def wait(self): - """Wait until all servers have completed running.""" - try: - if self.children: - self.wait_on_children() - else: - self.pool.waitall() - except KeyboardInterrupt: - pass - - def run_child(self): - def child_hup(*args): - """Shut down child processes, existing requests are handled.""" - signal.signal(signal.SIGHUP, signal.SIG_IGN) - eventlet.wsgi.is_accepting = False - self.sock.close() - - pid = os.fork() - if pid == 0: - signal.signal(signal.SIGHUP, child_hup) - signal.signal(signal.SIGTERM, signal.SIG_DFL) - # ignore the interrupt signal to avoid a race whereby - # a child worker receives the signal before the parent - # and is respawned unnecessarily as a result - signal.signal(signal.SIGINT, signal.SIG_IGN) - # The child has no need to stash the unwrapped - # socket, and the reference prevents a clean - # exit on sighup - self._sock = None - self.run_server() - LOG.info('Child %d exiting normally', os.getpid()) - # self.pool.waitall() is now called in wsgi's server so - # it's safe to exit here - sys.exit(0) - else: - LOG.info('Started child %s', pid) - self.children.add(pid) - - def run_server(self): - """Run a WSGI server.""" - - eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" - eventlet.hubs.use_hub('poll') - eventlet.patcher.monkey_patch(all=False, socket=True) - self.pool = eventlet.GreenPool(size=self.threads) - socket_timeout = cfg.CONF.senlin_api.client_socket_timeout or None - - try: - eventlet.wsgi.server( - self.sock, self.application, - custom_pool=self.pool, - url_length_limit=URL_LENGTH_LIMIT, - log=self._logger, - debug=cfg.CONF.debug, - keepalive=cfg.CONF.senlin_api.wsgi_keep_alive, - socket_timeout=socket_timeout) - except socket.error as err: - if err.errno != errno.EINVAL: - raise - - self.pool.waitall() - - def _single_run(self, application, sock): - """Start a WSGI server in a new green thread.""" - - LOG.info("Starting single process server") - eventlet.wsgi.server(sock, application, custom_pool=self.pool, - url_length_limit=URL_LENGTH_LIMIT, - log=self._logger, debug=cfg.CONF.debug) - - -class Middleware(object): - """Base WSGI middleware wrapper. - - These classes require an application to be initialized that will be called - next. By default the middleware will simply call its wrapped app, or you - can override __call__ to customize its behavior. - """ - - def __init__(self, application): - self.application = application - - def process_request(self, request): - """Called on each request. - - If this returns None, the next application down the stack will be - executed. If it returns a response then that response will be returned - and execution will stop here. - - :param request: A request object to be processed. - :returns: None. - """ - - return None - - def process_response(self, response): - """Customize the response.""" - return response - - @webob_dec.wsgify - def __call__(self, request): - response = self.process_request(request) - if response: - return response - response = request.get_response(self.application) - return self.process_response(response) - - -class Debug(Middleware): - """Helper class that can be inserted into any WSGI application chain.""" - - @webob_dec.wsgify - def __call__(self, req): - print(("*" * 40) + " REQUEST ENVIRON") - for key, value in req.environ.items(): - print(key, "=", value) - print('') - resp = req.get_response(self.application) - - print(("*" * 40) + " RESPONSE HEADERS") - for (key, value) in resp.headers.items(): - print(key, "=", value) - print('') - - resp.app_iter = self.print_generator(resp.app_iter) - - return resp - - @staticmethod - def print_generator(app_iter): - # Iterator that prints the contents of a wrapper string iterator - # when iterated. - print(("*" * 40) + " BODY") - for part in app_iter: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print('') - - -def debug_filter(app, conf, **local_conf): - return Debug(app) - - -class Router(object): - """WSGI middleware that maps incoming requests to WSGI apps.""" - - def __init__(self, mapper): - """Create a router for the given routes.Mapper.""" - - self.map = mapper - self._router = middleware.RoutesMiddleware(self._dispatch, self.map) - - @webob_dec.wsgify - def __call__(self, req): - """Route the incoming request to a controller based on self.map.""" - - return self._router - - @staticmethod - @webob_dec.wsgify - def _dispatch(req): - """Private dispatch method. - - Called by self._router() after matching the incoming request to - a route and putting the information into req.environ. - :returns: Either returns 404 or the routed WSGI app's response. - """ - - match = req.environ['wsgiorg.routing_args'][1] - if not match: - return exc.HTTPNotFound() - app = match['controller'] - return app - - -class Request(webob.Request): - """Add some OpenStack API-specific logics to the base webob.Request.""" - - def best_match_content_type(self): - """Determine the requested response content-type.""" - supported = ('application/json',) - bm = self.accept.best_match(supported) - return bm or 'application/json' - - def get_content_type(self, allowed_content_types): - """Determine content type of the request body.""" - if "Content-Type" not in self.headers: - raise exception.InvalidContentType(content_type=None) - - content_type = self.content_type - - if content_type not in allowed_content_types: - raise exception.InvalidContentType(content_type=content_type) - else: - return content_type - - def best_match_language(self): - """Determine best available locale from the Accept-Language header. - - :returns: the best language match or None if the 'Accept-Language' - header was not available in the request. - """ - if not self.accept_language: - return None - all_languages = oslo_i18n.get_available_languages('senlin') - return self.accept_language.best_match(all_languages) - - -class Resource(object): - """WSGI app that handles (de)serialization and controller dispatch. - - Read routing information supplied by RoutesMiddleware and call - the requested action method upon its deserializer, controller, - and serializer. Those three objects may implement any of the basic - controller action methods (create, update, show, index, delete) - along with any that may be specified in the api router. A 'default' - method may also be implemented to be used in place of any - non-implemented actions. Deserializer methods must accept a request - argument and return a dictionary. Controller methods must accept a - request argument. Additionally, they must also accept keyword - arguments that represent the keys returned by the Deserializer. They - may raise a webob.exc exception or return a dict, which will be - serialized by requested content type. - """ - - def __init__(self, controller): - """Initializer. - - :param controller: object that implement methods created by routes lib - """ - self.controller = controller - self.deserializer = serializers.JSONRequestDeserializer() - self.serializer = serializers.JSONResponseSerializer() - - @webob_dec.wsgify(RequestClass=Request) - def __call__(self, request): - """WSGI method that controls (de)serialization and method dispatch.""" - action_args = self.get_action_args(request.environ) - action = action_args.pop('action', None) - status_code = action_args.pop('success', None) - - try: - deserialized_request = self.dispatch(self.deserializer, - action, request) - action_args.update(deserialized_request) - - LOG.debug('Calling %(controller)s : %(action)s', - {'controller': self.controller, 'action': action}) - - action_result = self.dispatch(self.controller, action, - request, **action_args) - except TypeError as err: - LOG.error('Exception handling resource: %s', err) - msg = _('The server could not comply with the request since ' - 'it is either malformed or otherwise incorrect.') - err = exc.HTTPBadRequest(msg) - http_exc = translate_exception(err, request.best_match_language()) - # NOTE(luisg): We disguise HTTP exceptions, otherwise they will be - # treated by wsgi as responses ready to be sent back and they - # won't make it into the pipeline app that serializes errors - raise exception.HTTPExceptionDisguise(http_exc) - except exc.HTTPException as err: - if not isinstance(err, exc.HTTPError): - # Some HTTPException are actually not errors, they are - # responses ready to be sent back to the users, so we don't - # create error log, but disguise and translate them to meet - # openstacksdk's need. - http_exc = translate_exception(err, - request.best_match_language()) - raise http_exc - if isinstance(err, exc.HTTPServerError): - LOG.error( - "Returning %(code)s to user: %(explanation)s", - {'code': err.code, 'explanation': err.explanation}) - http_exc = translate_exception(err, request.best_match_language()) - raise exception.HTTPExceptionDisguise(http_exc) - except exception.SenlinException as err: - raise translate_exception(err, request.best_match_language()) - except Exception as err: - log_exception(err) - raise translate_exception(err, request.best_match_language()) - - try: - response = webob.Response(request=request) - # Customize status code if default (200) should be overridden - if status_code is not None: - response.status_code = int(status_code) - # Customize 'location' header if provided - if action_result and isinstance(action_result, dict): - location = action_result.pop('location', None) - if location: - response.location = '/v1%s' % location - if not action_result: - action_result = None - - # Attach openstack-api-version header - if hasattr(response, 'headers'): - for hdr, val in response.headers.items(): - # Note(lvdongbing): Ensure header is a python 2 or 3 - # native string (thus not unicode in python 2 but stay - # a string in python 3). Because mod-wsgi checks that - # response header values are what's described as - # "native strings". This means whatever `str` is in - # either python 2 or 3, but never `unicode`. - response.headers[hdr] = str(val) - ver = request.version_request - if not ver.is_null(): - ver_res = ' '.join(['clustering', str(ver)]) - response.headers[API_VERSION_KEY] = ver_res - response.headers['Vary'] = API_VERSION_KEY - - self.dispatch(self.serializer, action, response, action_result) - return response - - # return unserializable result (typically an exception) - except Exception: - return action_result - - def dispatch(self, obj, action, *args, **kwargs): - """Find action-specific method on self and call it.""" - try: - method = getattr(obj, action) - except AttributeError: - method = getattr(obj, 'default') - - try: - return method(*args, **kwargs) - except exception.MethodVersionNotFound: - raise - - def get_action_args(self, request_environment): - """Parse dictionary created by routes library.""" - try: - args = request_environment['wsgiorg.routing_args'][1].copy() - except Exception: - return {} - - try: - del args['controller'] - except KeyError: - pass - - try: - del args['format'] - except KeyError: - pass - - return args - - -class ControllerMetaclass(type): - - def __new__(mcs, name, bases, cls_dict): - versioned_methods = None - for base in bases: - if base.__name__ == "Controller": - if VER_METHOD_ATTR in base.__dict__: - versioned_methods = getattr(base, VER_METHOD_ATTR) - delattr(base, VER_METHOD_ATTR) - if versioned_methods: - cls_dict[VER_METHOD_ATTR] = versioned_methods - - return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, - cls_dict) - - -class Controller(object, metaclass=ControllerMetaclass): - """Generic WSGI controller for resources.""" - - def __init__(self, options): - self.options = options - self.rpc_client = rpc_client.get_engine_client() - - def __getattribute__(self, key): - - def version_select(*args, **kwargs): - """Look for the method and invoke the versioned one. - - This method looks for the method that matches the name provided - and version constraints then calls it with the supplied arguments. - - :returns: The result of the method called. - :raises: MethodVersionNotFound if there is no method matching the - name and the version constraints. - """ - - # The first argument is always the request object. The version - # request is attached to the request object. - req = kwargs['req'] if len(args) == 0 else args[0] - ver = req.version_request - func_list = self.versioned_methods[key] - for func in func_list: - if ver.matches(func.min_version, func.max_version): - # update version_select wrapper so other decorator - # attributes are still respected - functools.update_wrapper(version_select, func.func) - return func.func(self, *args, **kwargs) - - # no version match - raise exception.MethodVersionNotFound(version=ver) - - try: - version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) - except AttributeError: - # no versioning on this class - return object.__getattribute__(self, key) - - if version_meth_dict: - if key in object.__getattribute__(self, VER_METHOD_ATTR): - return version_select - - return object.__getattribute__(self, key) - - # This decorator must appear first (the outermost decorator) on an API - # method for it to work correctly - @classmethod - def api_version(cls, min_ver, max_ver=None): - """Decorator for versioning api methods. - - Add the decorator to any method that takes a request object as the - first parameter and belongs to a class which inherits from - wsgi.Controller. - :param min_ver: String representing the minimum version. - :param max_ver: Optional string representing the maximum version. - """ - - def decorator(f): - obj_min_ver = version_request.APIVersionRequest(min_ver) - obj_max_ver = version_request.APIVersionRequest(max_ver) - - func_name = f.__name__ - new_func = versioned_method.VersionedMethod( - func_name, obj_min_ver, obj_max_ver, f) - - func_dict = getattr(cls, VER_METHOD_ATTR, {}) - if not func_dict: - setattr(cls, VER_METHOD_ATTR, func_dict) - - func_list = func_dict.get(func_name, []) - if not func_list: - func_dict[func_name] = func_list - func_list.append(new_func) - - # Ensure the list is sorted by minimum version (reversed) so when - # we walk through the list later in order we find the method with - # the latest version which supports the version requested - func_list.sort(key=lambda f: f.min_version, reverse=True) - - return f - - return decorator - - def default(self, req, **args): - raise exc.HTTPNotFound() - - -def log_exception(err): - LOG.error("Unexpected error occurred serving API: %s", err) - - -def translate_exception(ex, locale): - """Translate all translatable elements of the given exception.""" - if isinstance(ex, exception.SenlinException): - ex.message = oslo_i18n.translate(ex.message, locale) - else: - ex.message = oslo_i18n.translate(str(ex), locale) - - if isinstance(ex, exc.HTTPError): - ex.explanation = oslo_i18n.translate(ex.explanation, locale) - ex.detail = oslo_i18n.translate(getattr(ex, 'detail', ''), locale) - return ex - - -class BasePasteFactory(object, metaclass=abc.ABCMeta): - """A base class for paste app and filter factories. - - Sub-classes must override the KEY class attribute and provide - a __call__ method. - """ - - KEY = None - - def __init__(self, conf): - self.conf = conf - - @abc.abstractmethod - def __call__(self, global_conf, **local_conf): - return - - def _import_factory(self, local_conf): - """Import an app/filter class. - - Lookup the KEY from the PasteDeploy local conf and import the - class named there. This class can then be used as an app or - filter factory. - """ - class_name = local_conf[self.KEY].replace(':', '.').strip() - return importutils.import_class(class_name) - - -class AppFactory(BasePasteFactory): - """A Generic paste.deploy app factory. - - The WSGI app constructor must accept a ConfigOpts object and a local - config dict as its arguments. - """ - - KEY = 'senlin.app_factory' - - def __call__(self, global_conf, **local_conf): - - factory = self._import_factory(local_conf) - return factory(self.conf, **local_conf) - - -class FilterFactory(AppFactory): - """A Generic paste.deploy filter factory. - - This requires senlin.filter_factory to be set to a callable which returns - a WSGI filter when invoked. The WSGI filter constructor must accept a - WSGI app, a ConfigOpts object and a local config dict as its arguments. - """ - - KEY = 'senlin.filter_factory' - - def __call__(self, global_conf, **local_conf): - - factory = self._import_factory(local_conf) - - def filter(app): - return factory(app, self.conf, **local_conf) - - return filter - - -def setup_paste_factories(conf): - """Set up the generic paste app and filter factories. - - The app factories are constructed at runtime to allow us to pass a - ConfigOpts object to the WSGI classes. - - :param conf: a ConfigOpts object - """ - global app_factory, filter_factory - - app_factory = AppFactory(conf) - filter_factory = FilterFactory(conf) - - -def teardown_paste_factories(): - """Reverse the effect of setup_paste_factories().""" - global app_factory, filter_factory - - del app_factory - del filter_factory - - -def paste_deploy_app(paste_config_file, app_name, conf): - """Load a WSGI app from a PasteDeploy configuration. - - Use deploy.loadapp() to load the app from the PasteDeploy configuration, - ensuring that the supplied ConfigOpts object is passed to the app and - filter constructors. - - :param paste_config_file: a PasteDeploy config file - :param app_name: the name of the app/pipeline to load from the file - :param conf: a ConfigOpts object to supply to the app and its filters - :returns: the WSGI app - """ - setup_paste_factories(conf) - try: - return deploy.loadapp("config:%s" % paste_config_file, name=app_name) - finally: - teardown_paste_factories() - - -def _get_deployment_config_file(): - """Retrieve item from deployment_config_file. - - The retrieved item is formatted as an absolute pathname. - """ - config_path = cfg.CONF.find_file(cfg.CONF.senlin_api.api_paste_config) - if config_path is None: - return None - - return os.path.abspath(config_path) - - -def load_paste_app(app_name=None): - """Build and return a WSGI app from a paste config file. - - We assume the last config file specified in the supplied ConfigOpts - object is the paste config file. - - :param app_name: name of the application to load - - :raises RuntimeError when config file cannot be located or application - cannot be loaded from config file - """ - if app_name is None: - app_name = cfg.CONF.prog - - conf_file = _get_deployment_config_file() - if conf_file is None: - raise RuntimeError(_("Unable to locate config file")) - - try: - app = paste_deploy_app(conf_file, app_name, cfg.CONF) - - # Log the options used when starting if we're in debug mode... - if cfg.CONF.debug: - cfg.CONF.log_opt_values(logging.getLogger(app_name), - logging.DEBUG) - - return app - except (LookupError, ImportError) as e: - raise RuntimeError(_("Unable to load %(app_name)s from " - "configuration file %(conf_file)s." - "\nGot: %(e)r") % {'app_name': app_name, - 'conf_file': conf_file, - 'e': e}) diff --git a/senlin/api/middleware/__init__.py b/senlin/api/middleware/__init__.py deleted file mode 100644 index 1d0cc5073..000000000 --- a/senlin/api/middleware/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.api.middleware import context -from senlin.api.middleware import fault -from senlin.api.middleware import trust -from senlin.api.middleware import version_negotiation as vn -from senlin.api.middleware import webhook - - -def version_filter(app, conf, **local_conf): - return vn.VersionNegotiationFilter(app, conf) - - -def fault_filter(app, conf, **local_conf): - return fault.FaultWrapper(app) - - -def context_filter(app, conf, **local_conf): - return context.ContextMiddleware(app) - - -def trust_filter(app, conf, **local_conf): - return trust.TrustMiddleware(app) - - -def webhook_filter(app, conf, **local_conf): - return webhook.WebhookMiddleware(app) diff --git a/senlin/api/middleware/context.py b/senlin/api/middleware/context.py deleted file mode 100644 index 83c2fd5ee..000000000 --- a/senlin/api/middleware/context.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_middleware import request_id as oslo_request_id -from oslo_utils import encodeutils - -from senlin.api.common import wsgi -from senlin.common import context -from senlin.common import exception - - -class ContextMiddleware(wsgi.Middleware): - - def process_request(self, req): - """Build context from authentication info extracted from request.""" - - headers = req.headers - environ = req.environ - try: - auth_url = headers.get('X-Auth-Url') - if not auth_url: - # Use auth_url defined in senlin.conf - auth_url = cfg.CONF.authentication.auth_url - - auth_token = headers.get('X-Auth-Token') - auth_token_info = environ.get('keystone.token_info') - - project_id = headers.get('X-Project-Id') - project_name = headers.get('X-Project-Name') - project_domain = headers.get('X-Project-Domain-Id') - project_domain_name = headers.get('X-Project-Domain-Name') - - user_id = headers.get('X-User-Id') - user_name = headers.get('X-User-Name') - user_domain = headers.get('X-User-Domain-Id') - user_domain_name = headers.get('X-User-Domain-Name') - - domain_id = headers.get('X-Domain-Id') - domain_name = headers.get('X-Domain-Name') - - region_name = headers.get('X-Region-Name') - - roles = headers.get('X-Roles') - if roles is not None: - roles = roles.split(',') - - env_req_id = environ.get(oslo_request_id.ENV_REQUEST_ID) - if env_req_id is None: - request_id = None - else: - request_id = encodeutils.safe_decode(env_req_id) - - except Exception: - raise exception.NotAuthenticated() - - api_version = str(req.version_request) - req.context = context.RequestContext( - auth_token=auth_token, - user_id=user_id, - project_id=project_id, - domain_id=domain_id, - user_domain=user_domain, - project_domain=project_domain, - request_id=request_id, - auth_url=auth_url, - user_name=user_name, - project_name=project_name, - domain_name=domain_name, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - auth_token_info=auth_token_info, - region_name=region_name, - roles=roles, - api_version=api_version - ) diff --git a/senlin/api/middleware/fault.py b/senlin/api/middleware/fault.py deleted file mode 100644 index 708d25e8f..000000000 --- a/senlin/api/middleware/fault.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A middleware that turns exceptions into parsable string. -""" - -from oslo_utils import reflection -import webob - -from senlin.api.common import serializers -from senlin.api.common import wsgi -from senlin.common import exception - - -class Fault(object): - - def __init__(self, error): - self.error = error - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - serializer = serializers.JSONResponseSerializer() - resp = webob.Response(request=req) - default_webob_exc = webob.exc.HTTPInternalServerError() - resp.status_code = self.error.get('code', default_webob_exc.code) - serializer.default(resp, self.error) - return resp - - -class FaultWrapper(wsgi.Middleware): - """Replace error body with something the client can parse.""" - - error_map = { - 'ActionConflict': webob.exc.HTTPConflict, - 'ActionCooldown': webob.exc.HTTPConflict, - 'ActionInProgress': webob.exc.HTTPConflict, - 'ActionImmutable': webob.exc.HTTPConflict, - 'BadRequest': webob.exc.HTTPBadRequest, - 'FeatureNotSupported': webob.exc.HTTPConflict, - 'Forbidden': webob.exc.HTTPForbidden, - 'InternalError': webob.exc.HTTPInternalServerError, - 'InvalidGlobalAPIVersion': webob.exc.HTTPNotAcceptable, - 'InvalidSpec': webob.exc.HTTPBadRequest, - 'MethodVersionNotFound': webob.exc.HTTPBadRequest, - 'MultipleChoices': webob.exc.HTTPBadRequest, - 'NodeNotOrphan': webob.exc.HTTPConflict, - 'PolicyBindingNotFound': webob.exc.HTTPNotFound, - 'ProfileOperationFailed': webob.exc.HTTPInternalServerError, - 'RequestLimitExceeded': webob.exc.HTTPBadRequest, - 'ResourceInUse': webob.exc.HTTPConflict, - 'ResourceIsLocked': webob.exc.HTTPConflict, - 'ResourceNotFound': webob.exc.HTTPNotFound, - } - - def _map_exception_to_error(self, class_exception): - if class_exception == Exception: - return webob.exc.HTTPInternalServerError - - if class_exception.__name__ not in self.error_map: - return self._map_exception_to_error(class_exception.__base__) - - return self.error_map[class_exception.__name__] - - def _error(self, ex): - traceback_marker = 'Traceback (most recent call last)' - webob_exc = None - if isinstance(ex, exception.HTTPExceptionDisguise): - ex = ex.exc - webob_exc = ex - - ex_type = reflection.get_class_name(ex, fully_qualified=False) - - is_remote = ex_type.endswith('_Remote') - if is_remote: - ex_type = ex_type[:-len('_Remote')] - - full_message = str(ex) - if '\n' in full_message and is_remote: - message = full_message.split('\n', 1)[0] - elif traceback_marker in full_message: - message = full_message.split(traceback_marker, 1)[0] - message = message.rstrip('\n') - else: - message = full_message - - if isinstance(ex, exception.SenlinException): - message = ex.message - - if not webob_exc: - webob_exc = self._map_exception_to_error(ex.__class__) - - error = { - 'code': webob_exc.code, - 'title': webob_exc.title, - 'explanation': webob_exc.explanation, - 'error': { - 'code': webob_exc.code, - 'message': message, - 'type': ex_type, - } - } - - return error - - def process_request(self, req): - try: - return req.get_response(self.application) - except Exception as exc: - return req.get_response(Fault(self._error(exc))) diff --git a/senlin/api/middleware/trust.py b/senlin/api/middleware/trust.py deleted file mode 100644 index 81453ef29..000000000 --- a/senlin/api/middleware/trust.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import context -from senlin.common import exception -from senlin.drivers import base as driver_base -from senlin.rpc import client as rpc - - -class TrustMiddleware(wsgi.Middleware): - """Extract trust info from request. - - The extracted information is filled into the request context. - Senlin engine will use this information for access control. - """ - def _get_trust(self, req): - """List trusts with current user as the trustor. - - :param req: The WSGI request object. - :return: ID of the trust or exception of InternalError. - """ - rpcc = rpc.get_engine_client() - - ctx = req.context - params = {'user': ctx.user_id, 'project': ctx.project_id} - obj = util.parse_request('CredentialGetRequest', req, params) - res = rpcc.call(ctx, 'credential_get', obj) - if res: - trust_id = res.get('trust', None) - if trust_id: - return trust_id - - params = { - 'auth_url': ctx.auth_url, - 'token': ctx.auth_token, - 'user_id': ctx.user_id, - } - kc = driver_base.SenlinDriver().identity(params) - service_cred = context.get_service_credentials() - admin_id = kc.get_user_id(**service_cred) - try: - trust = kc.trust_get_by_trustor(ctx.user_id, admin_id, - ctx.project_id) - except exception.InternalError as ex: - if ex.code == 400: - trust = None - else: - raise - if not trust: - # Create a trust if no existing one found - trust = kc.trust_create(ctx.user_id, admin_id, ctx.project_id, - ctx.roles) - - # If credential not exists, create it, otherwise update it. - cred = {'openstack': {'trust': trust.id}} - params = {'cred': cred} - obj = util.parse_request('CredentialCreateRequest', req, params) - rpcc.call(ctx, 'credential_create', obj) - - return trust.id - - def process_request(self, req): - trust_id = self._get_trust(req) - req.context.trusts = trust_id diff --git a/senlin/api/middleware/version_negotiation.py b/senlin/api/middleware/version_negotiation.py deleted file mode 100644 index 8230c34bc..000000000 --- a/senlin/api/middleware/version_negotiation.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A filter middleware that inspects the requested URI for a version string -and/or Accept headers and attempts to negotiate an API controller to -return -""" - -import re - -import microversion_parse as mp -from oslo_log import log as logging -import webob - -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.api.openstack import versions as os_ver -from senlin.common import exception - -LOG = logging.getLogger(__name__) - - -class VersionNegotiationFilter(wsgi.Middleware): - - def __init__(self, app, conf): - self.versions_app = os_ver.Controller(conf) - self.version_uri_regex = re.compile(r"^v([1-9]\d*)\.?([1-9]\d*|0)?$") - self.conf = conf - super(VersionNegotiationFilter, self).__init__(app) - - def process_request(self, req): - """Process WSGI requests. - - If there is a version identifier in the URI, simply return the correct - API controller, otherwise, if we find an Accept: header, process it - """ - LOG.debug( - "Processing request: %(method)s %(path)s Accept: %(accept)s", - {'method': req.method, 'path': req.path, 'accept': req.accept} - ) - - # If the request is for /versions, just return the versions container - path_info_peak = req.path_info_peek() - if path_info_peak in ('versions', ''): - return self.versions_app - - accept = str(req.accept) - - # Check if there is a requested (micro-)version for API - controller = self._get_controller(req.path_info_peek() or '', req) - if controller: - self._check_version_request(req, controller) - major = req.environ['api.major'] - minor = req.environ['api.minor'] - LOG.debug("Matched versioned URI. Version: %(major)d.%(minor)d", - {'major': major, 'minor': minor}) - # Strip the version from the path - req.path_info_pop() - path = req.path_info_peek() - if path is None or path == '/': - return controller(self.conf) - return None - elif accept.startswith('application/vnd.openstack.clustering-'): - token_loc = len('application/vnd.openstack.clustering-') - accept_version = accept[token_loc:] - controller = self._get_controller(accept_version, req) - if controller: - self._check_version_request(req, controller) - major = req.environ['api.major'] - minor = req.environ['api.minor'] - LOG.debug("Matched versioned media type. Version: " - "%(major)d.%(minor)d", - {'major': major, 'minor': minor}) - path = req.path_info_peek() - if path is None or path == '/': - return controller(self.conf) - return None - else: - LOG.debug("Unknown version in request") - - if accept not in ('*/*', '') and path_info_peak is not None: - LOG.debug("Returning HTTP 404 due to unknown Accept header: %s ", - accept) - return webob.exc.HTTPNotFound() - - return self.versions_app - - def _get_controller(self, subject, req): - """Get a version specific controller based on endpoint version. - - Given a subject string, tries to match a major and/or minor version - number. If found, sets the api.major and api.minor environ variables. - - :param subject: The string to check - :param req: Webob.Request object - :returns: A version controller instance or None. - """ - match = self.version_uri_regex.match(subject) - if not match: - return None - - major, minor = match.groups(0) - major = int(major) - minor = int(minor) - req.environ['api.major'] = major - req.environ['api.minor'] = minor - version = '%s.%s' % (major, minor) - return self.versions_app.get_controller(version) - - def _check_version_request(self, req, controller): - """Set API version request based on the request header and controller. - - :param req: The webob.Request object. - :param controller: The API version controller. - :returns: ``None`` - :raises: ``HTTPBadRequest`` if API version string is bad. - """ - api_version = mp.get_version(req.headers, 'clustering') - if api_version is None: - api_version = controller.DEFAULT_API_VERSION - elif api_version.lower() == 'latest': - req.version_request = controller.max_api_version() - return - - try: - ver = vr.APIVersionRequest(api_version) - except exception.InvalidAPIVersionString as e: - raise webob.exc.HTTPBadRequest(str(e)) - - if not ver.matches(controller.min_api_version(), - controller.max_api_version()): - raise exception.InvalidGlobalAPIVersion( - req_ver=api_version, - min_ver=str(controller.min_api_version()), - max_ver=str(controller.max_api_version())) - - req.version_request = ver diff --git a/senlin/api/middleware/webhook.py b/senlin/api/middleware/webhook.py deleted file mode 100644 index 698e5e70c..000000000 --- a/senlin/api/middleware/webhook.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from urllib import parse as urlparse -import webob - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.drivers import base as driver_base -from senlin.rpc import client as rpc - -LOG = logging.getLogger(__name__) - - -class WebhookMiddleware(wsgi.Middleware): - """Middleware for authenticating webhook triggering requests. - - This middleware authenticates the webhook trigger requests and then - rebuilds the request header so that the request will successfully pass - the verification of keystone auth_token middleware. - """ - def process_request(self, req): - # We only handle POST requests - if req.method != 'POST': - return - - # Extract webhook (receiver) ID and params - results = self._parse_url(req.url) - if not results: - return - - (receiver_id, params) = results - - api_version = str(req.version_request) - ctx = context.RequestContext(is_admin=True, api_version=api_version) - req.context = ctx - - obj = util.parse_request( - 'ReceiverGetRequest', req, {'identity': receiver_id}) - rpcc = rpc.get_engine_client() - receiver = rpcc.call(ctx, 'receiver_get', obj) - - svc_ctx = context.get_service_credentials() - kwargs = { - 'auth_url': svc_ctx['auth_url'], - 'username': svc_ctx['username'], - 'user_domain_name': svc_ctx['user_domain_name'], - 'password': svc_ctx['password'], - 'project_domain_name': svc_ctx['project_domain_name'], - 'verify': svc_ctx['verify'], - 'interface': svc_ctx['interface'], - } - kwargs.update(receiver['actor']) - - # Get token and fill it into the request header - token = self._get_token(**kwargs) - req.headers['X-Auth-Token'] = token - - def _parse_url(self, url): - """Extract receiver ID from the request URL. - - Parse a URL of format: http://host:port/v1/webhooks/id/trigger?V=1&k=v - :param url: The URL from which the request is received. - """ - parts = urlparse.urlparse(url) - p = parts.path.split('/') - - try: - index = p.index('v1') - p = p[(index + 1):] - except ValueError: - pass - - if len(p) != 3 or p[0] != 'webhooks' or p[2] != 'trigger': - return None - - # at this point it has been determined that the URL is a webhook - # trigger request - qs = urlparse.parse_qs(parts.query) - if 'V' in qs: - qs.pop('V') - else: - raise webob.exc.HTTPBadRequest( - explanation=_('V query parameter is required in webhook ' - 'trigger URL')) - - params = dict((k, v[0]) for k, v in qs.items()) - return p[1], params - - def _get_token(self, **kwargs): - """Get a valid token based on the credential provided. - - :param cred: Rebuilt credential dictionary for authentication. - """ - try: - token = driver_base.SenlinDriver().identity.get_token(**kwargs) - except Exception as ex: - LOG.exception('Webhook failed authentication: %s.', ex) - raise exc.Forbidden() - - return token diff --git a/senlin/api/openstack/__init__.py b/senlin/api/openstack/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/api/openstack/history.rst b/senlin/api/openstack/history.rst deleted file mode 100755 index cd200ba1e..000000000 --- a/senlin/api/openstack/history.rst +++ /dev/null @@ -1,132 +0,0 @@ - -API Version History -~~~~~~~~~~~~~~~~~~~ - -This document summarizes the changes made to the REST API with every bump of -API microversion. The description for each version should be verbose so that -it can be used by both users and developers. - - -1.1 ---- - -- This is the initial version of the v1 API which supports microversions. - The v1.1 API is identical to that of v1.0 except for the new supports to - microversion checking. - - A user can specify a header in the API request:: - - OpenStack-API-Version: clustering - - where the ```` is any valid API version supported. If such a - header is not provided, the API behaves as if a version request of v1.0 - is received. - -1.2 ---- - -- Added ``cluster_collect`` API. This API takes a single parameter ``path`` - and interprets it as a JSON path for extracting node properties. Properties - values from all nodes are aggregated into a list and returned to users. - -- Added ``profile_validate`` API. This API is provided to validate the spec - of a profile without really creating a profile object. - -- Added ``policy_validate`` API. This API validates the spec of a policy - without creating a policy object. - -1.3 ---- - -- Added ``cluster_replace_nodes`` API. This API enables users to replace the - specified existing nodes with ones that were not members of any clusters. - -1.4 ---- - -- Added ``profile_type_ops`` API. This API returns a dictionary containing - the operations and parameters supported by a specific profile type. - -- Added ``node_operation`` API. This API enables users to trigger an - operation on a node. The operation and its parameters are determined by the - profile type. - -- Added ``cluster_operation`` API. This API enables users to trigger an - operation on a cluster. The operation and its parameters are determined by - the profile type. - -- Added ``user`` query parameter for listing receivers. - -- Added ``destroy_after_deletion`` parameter for deleting cluster members. - -1.5 ---- - -- Added ``support_status`` to profile type list. - -- Added ``support_status`` to policy type list. - -- Added ``support_status`` to profile type show. - -- Added ``support_status`` to policy type show. - -1.6 ---- - -- Added ``profile_only`` parameter to cluster update request. - -- Added ``check`` parameter to node recover request. When this parameter is - specified, the engine will check if the node is active before performing - a recover operation. - -- Added ``check`` parameter to cluster recover request. When this parameter - is specified, the engine will check if the nodes are active before - performing a recover operation. - -1.7 ---- - -- Added ``node_adopt`` operation to node. - -- Added ``node_adopt_preview`` operation to node. - -- Added ``receiver_update`` operation to receiver. - -- Added ``service_list`` API. - -1.8 ---- -- Added ``force`` parameter to cluster delete request. -- Added ``force`` parameter to node delete request. - -1.9 ---- -- Added ``cluster_complete_lifecycle`` API. This API enables users to - trigger the immediate deletion of the nodes identified for deferred - deletion during scale-in operation. - -1.10 ----- -- Modified the ``webhook_trigger`` API. Inputs for the targeted action - are now sent directly in the query body rather than in the params - field. - -1.11 ----- -- Modified the ``cluster_action`` API. The API now responds with - response code 409 when a scaling action conflicts with one already - being processed or a cooldown for a scaling action is encountered. - -1.12 ----- -- Added ``action_update`` API. This API enables users to update the status of - an action (only CANCELLED is supported). An action that spawns dependent - actions will attempt to cancel all dependent actions. - -1.13 ----- -- Added ``tainted`` to responses returned by node APIs. - -1.14 ----- -- Added ``cluster_id`` to filters result returned action APIs. diff --git a/senlin/api/openstack/v1/__init__.py b/senlin/api/openstack/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/api/openstack/v1/actions.py b/senlin/api/openstack/v1/actions.py deleted file mode 100644 index e22593a47..000000000 --- a/senlin/api/openstack/v1/actions.py +++ /dev/null @@ -1,137 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class ActionData(object): - """All required data fields for an action.""" - - PARAMS = (consts.ACTION_NAME, consts.ACTION_TARGET, consts.ACTION_ACTION) - - def __init__(self, data): - self.data = data - - def name(self): - if consts.ACTION_NAME not in self.data: - raise exc.HTTPBadRequest(_("No action name specified")) - return self.data[consts.ACTION_NAME] - - def target(self): - if consts.ACTION_TARGET not in self.data: - raise exc.HTTPBadRequest(_("No target specified")) - return self.data[consts.ACTION_TARGET] - - def action(self): - if consts.ACTION_ACTION not in self.data: - raise exc.HTTPBadRequest(_("No action specified")) - return self.data[consts.ACTION_ACTION] - - def params(self): - data = self.data.items() - return dict((k, v) for k, v in data if k not in self.PARAMS) - - -class ActionController(wsgi.Controller): - """WSGI controller for Actions in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'actions' - - def _remove_cluster_id(self, req, obj): - if req.version_request > vr.APIVersionRequest("1.13"): - return obj - - if 'cluster_id' in obj: - obj.pop('cluster_id') - - return obj - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.ACTION_NAME: 'mixed', - consts.ACTION_CLUSTER_ID: 'mixed', - consts.ACTION_TARGET: 'mixed', - consts.ACTION_ACTION: 'mixed', - consts.ACTION_STATUS: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - for key in req.params.keys(): - if key not in whitelist.keys(): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - params = util.get_allowed_params(req.params, whitelist) - - project_safe = not util.parse_bool_param( - consts.PARAM_GLOBAL_PROJECT, - params.pop(consts.PARAM_GLOBAL_PROJECT, False)) - params['project_safe'] = project_safe - - obj = util.parse_request('ActionListRequest', req, params) - actions = self.rpc_client.call(req.context, "action_list", obj) - - actions = [self._remove_cluster_id(req, a) for a in actions] - return {'actions': actions} - - @util.policy_enforce - def create(self, req, body): - data = ActionData(body) - result = self.rpc_client.action_create(req.context, - data.name(), - data.target(), - data.action(), - data.params()) - - return self._remove_cluster_id(req, result) - - @util.policy_enforce - def get(self, req, action_id): - params = {'identity': action_id} - obj = util.parse_request('ActionGetRequest', req, params) - action = self.rpc_client.call(req.context, 'action_get', obj) - - action = self._remove_cluster_id(req, action) - return {'action': action} - - @wsgi.Controller.api_version('1.12') - @util.policy_enforce - def update(self, req, action_id, body): - data = body.get('action') - if data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'action' key in request body.")) - force_update = req.params.get('force') - - if force_update is not None: - force = util.parse_bool_param(consts.ACTION_UPDATE_FORCE, - force_update) - else: - force = False - - data['force'] = force - data['identity'] = action_id - - obj = util.parse_request('ActionUpdateRequest', req, data) - self.rpc_client.call(req.context, 'action_update', obj) - - raise exc.HTTPAccepted diff --git a/senlin/api/openstack/v1/build_info.py b/senlin/api/openstack/v1/build_info.py deleted file mode 100644 index fb6fac784..000000000 --- a/senlin/api/openstack/v1/build_info.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.rpc import client as rpc_client - - -class BuildInfoController(wsgi.Controller): - """WSGI controller for BuildInfo in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'build_info' - - def __init__(self, options): - self.options = options - self.rpc_client = rpc_client.get_engine_client() - - @util.policy_enforce - def build_info(self, req): - obj = util.parse_request('GetRevisionRequest', req, {}) - engine_revision = self.rpc_client.call(req.context, 'get_revision', - obj) - build_info = { - 'api': {'revision': cfg.CONF.revision['senlin_api_revision']}, - 'engine': {'revision': engine_revision} - } - - return {'build_info': build_info} diff --git a/senlin/api/openstack/v1/cluster_policies.py b/senlin/api/openstack/v1/cluster_policies.py deleted file mode 100644 index 40e651f3f..000000000 --- a/senlin/api/openstack/v1/cluster_policies.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -ClusterPolicies endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class ClusterPolicyController(wsgi.Controller): - """WSGI controller for Cluster-Policy binding in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'cluster_policies' - - @util.policy_enforce - def index(self, req, cluster_id): - param_whitelist = { - consts.CP_ENABLED: 'single', - consts.CP_POLICY_NAME: 'single', - consts.CP_POLICY_TYPE: 'single', - consts.PARAM_SORT: 'single', - } - for key in req.params.keys(): - if (key not in param_whitelist.keys()): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - - params = util.get_allowed_params(req.params, param_whitelist) - key = consts.CP_ENABLED - if key in params: - params[key] = util.parse_bool_param(key, params[key]) - params['identity'] = cluster_id - - obj = util.parse_request('ClusterPolicyListRequest', req, params) - policies = self.rpc_client.call(req.context, 'cluster_policy_list', - obj) - - return {'cluster_policies': policies} - - @util.policy_enforce - def get(self, req, cluster_id, policy_id): - - params = {'identity': cluster_id, 'policy_id': policy_id} - obj = util.parse_request('ClusterPolicyGetRequest', req, params) - cluster_policy = self.rpc_client.call(req.context, - 'cluster_policy_get', obj) - return {'cluster_policy': cluster_policy} diff --git a/senlin/api/openstack/v1/clusters.py b/senlin/api/openstack/v1/clusters.py deleted file mode 100644 index 433c8d429..000000000 --- a/senlin/api/openstack/v1/clusters.py +++ /dev/null @@ -1,332 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cluster endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class ClusterController(wsgi.Controller): - """WSGI controller for clusters resource in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'clusters' - - SUPPORTED_ACTIONS = ( - ADD_NODES, DEL_NODES, SCALE_OUT, SCALE_IN, RESIZE, - POLICY_ATTACH, POLICY_DETACH, POLICY_UPDATE, - CHECK, RECOVER, REPLACE_NODES, COMPLETE_LIFECYCLE - ) = ( - 'add_nodes', 'del_nodes', 'scale_out', 'scale_in', 'resize', - 'policy_attach', 'policy_detach', 'policy_update', - 'check', 'recover', 'replace_nodes', 'complete_lifecycle' - ) - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.CLUSTER_NAME: 'mixed', - consts.CLUSTER_STATUS: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - for key in req.params.keys(): - if key not in whitelist: - raise exc.HTTPBadRequest(_("Invalid parameter '%s'") % key) - - params = util.get_allowed_params(req.params, whitelist) - # Note: We have to do a boolean parsing here because 1) there is - # a renaming, 2) the boolean is usually presented as a string. - is_global = params.pop(consts.PARAM_GLOBAL_PROJECT, False) - unsafe = util.parse_bool_param(consts.PARAM_GLOBAL_PROJECT, is_global) - params['project_safe'] = not unsafe - req_obj = util.parse_request('ClusterListRequest', req, params) - clusters = self.rpc_client.call(req.context, 'cluster_list', req_obj) - return {'clusters': clusters} - - @util.policy_enforce - def create(self, req, body): - """Create a new cluster.""" - obj = util.parse_request('ClusterCreateRequest', req, body, 'cluster') - cluster = self.rpc_client.call(req.context, 'cluster_create', - obj.cluster) - action_id = cluster.pop('action') - result = { - 'cluster': cluster, - 'location': '/actions/%s' % action_id, - } - return result - - @util.policy_enforce - def get(self, req, cluster_id): - """Gets detailed information for a cluster.""" - body = {'identity': cluster_id} - obj = util.parse_request('ClusterGetRequest', req, body) - cluster = self.rpc_client.call(req.context, 'cluster_get', obj) - - return {'cluster': cluster} - - @util.policy_enforce - def update(self, req, cluster_id, body): - """Update an existing cluster with new parameters.""" - data = body.get('cluster') - if data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'cluster' key in request body.")) - params = body['cluster'] - params['identity'] = cluster_id - - obj = util.parse_request('ClusterUpdateRequest', req, params) - cluster = self.rpc_client.call(req.context, 'cluster_update', obj) - - action_id = cluster.pop('action') - result = { - 'cluster': cluster, - 'location': '/actions/%s' % action_id, - } - return result - - def _do_add_nodes(self, req, cid, data): - nodes = data.get('nodes', []) - params = {'identity': cid, 'nodes': nodes} - obj = util.parse_request('ClusterAddNodesRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_add_nodes', obj) - - def _do_del_nodes(self, req, cid, data): - nodes = data.get('nodes', []) - destroy = data.get('destroy_after_deletion', False) - params = {'identity': cid, 'nodes': nodes, - 'destroy_after_deletion': destroy} - obj = util.parse_request('ClusterDelNodesRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_del_nodes', obj) - - @wsgi.Controller.api_version('1.3') - def _do_replace_nodes(self, req, cluster_id, data): - nodes = data.get('nodes', {}) - if not nodes or not isinstance(nodes, dict): - msg = _("The data provided is not a map") - raise exc.HTTPBadRequest(msg) - - params = {'identity': cluster_id, 'nodes': nodes} - obj = util.parse_request('ClusterReplaceNodesRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_replace_nodes', - obj) - - def _do_resize(self, req, cluster_id, data): - params = {} - - for key in [consts.ADJUSTMENT_TYPE, consts.ADJUSTMENT_NUMBER, - consts.ADJUSTMENT_MIN_SIZE, consts.ADJUSTMENT_MAX_SIZE]: - if data.get(key, None) is not None: - params[key] = data.get(key) - - adj_type = data.get(consts.ADJUSTMENT_TYPE, None) - min_step = data.get(consts.ADJUSTMENT_MIN_STEP, None) - if ((adj_type == consts.CHANGE_IN_PERCENTAGE) and - min_step is not None): - params[consts.ADJUSTMENT_MIN_STEP] = min_step - - if not params: - msg = _("Not enough parameters to do resize action.") - raise exc.HTTPBadRequest(msg) - - strict = data.get(consts.ADJUSTMENT_STRICT, None) - if strict is not None: - params[consts.ADJUSTMENT_STRICT] = strict - - params['identity'] = cluster_id - obj = util.parse_request('ClusterResizeRequest', req, params) - - if (obj.obj_attr_is_set('adjustment_type') and - not obj.obj_attr_is_set('number')): - msg = _("Missing number value for size adjustment.") - raise exc.HTTPBadRequest(msg) - - if (obj.obj_attr_is_set('number') and - not obj.obj_attr_is_set('adjustment_type')): - msg = _("Missing adjustment_type value for size adjustment.") - raise exc.HTTPBadRequest(msg) - - if (obj.obj_attr_is_set('min_size') and - obj.obj_attr_is_set('max_size')): - if obj.max_size > 0 and obj.min_size > obj.max_size: - msg = _("The specified min_size (%(n)s) is greater than the " - "specified max_size (%(m)s)." - ) % {'m': obj.max_size, 'n': obj.min_size} - raise exc.HTTPBadRequest(msg) - - return self.rpc_client.call(req.context, 'cluster_resize', obj) - - def _do_scale_out(self, req, cid, data): - count = data.get('count', None) - params = {'identity': cid} - if count is not None: - params['count'] = count - obj = util.parse_request('ClusterScaleOutRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_scale_out', obj) - - def _do_scale_in(self, req, cid, data): - count = data.get('count', None) - params = {'identity': cid} - - if count is not None: - params['count'] = count - - obj = util.parse_request('ClusterScaleInRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_scale_in', obj) - - def _do_policy_attach(self, req, cid, data): - params = {'identity': cid} - params.update(data) - obj = util.parse_request('ClusterAttachPolicyRequest', req, params) - return self.rpc_client.call(req.context, - 'cluster_policy_attach', obj) - - def _do_policy_detach(self, req, cid, data): - params = {'identity': cid} - params.update(data) - - obj = util.parse_request('ClusterDetachPolicyRequest', req, params) - return self.rpc_client.call(req.context, - 'cluster_policy_detach', obj) - - def _do_policy_update(self, req, cid, data): - params = {'identity': cid} - params.update(data) - - obj = util.parse_request('ClusterUpdatePolicyRequest', req, params) - return self.rpc_client.call(req.context, - 'cluster_policy_update', obj) - - def _do_check(self, req, cid, data): - params = {'identity': cid, 'params': data} - obj = util.parse_request('ClusterCheckRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_check', obj) - - def _do_recover(self, req, cid, data): - params = {'identity': cid, 'params': data} - obj = util.parse_request('ClusterRecoverRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_recover', obj) - - @wsgi.Controller.api_version('1.9') - def _do_complete_lifecycle(self, req, cid, data): - lifecycle_action_token = data.get('lifecycle_action_token', None) - - params = {'identity': cid, - 'lifecycle_action_token': lifecycle_action_token} - obj = util.parse_request('ClusterCompleteLifecycleRequest', req, - params) - return self.rpc_client.call(req.context, 'cluster_complete_lifecycle', - obj) - - @util.policy_enforce - def action(self, req, cluster_id, body=None): - """Perform specified action on a cluster.""" - body = body or {} - if len(body) < 1: - raise exc.HTTPBadRequest(_('No action specified')) - - if len(body) > 1: - raise exc.HTTPBadRequest(_('Multiple actions specified')) - - this_action = list(body.keys())[0] - if this_action not in self.SUPPORTED_ACTIONS: - msg = _("Unrecognized action '%s' specified") % this_action - raise exc.HTTPBadRequest(msg) - - do_func_name = "_do_" + this_action - if not hasattr(self, do_func_name): - raise exc.HTTPBadRequest(_('Unsupported action')) - - do_func = getattr(self, do_func_name) - data = body.get(this_action, {}) - if not isinstance(data, dict): - msg = _("The data provided is not a map") - raise exc.HTTPBadRequest(msg) - - res = do_func(req, cluster_id, data) - - location = {'location': '/actions/%s' % res['action']} - res.update(location) - return res - - @wsgi.Controller.api_version('1.2') - @util.policy_enforce - def collect(self, req, cluster_id, path): - """Aggregate attribute values across a cluster.""" - stripped_path = path.strip() - if (stripped_path == '') or (stripped_path == 'None'): - raise exc.HTTPBadRequest(_("Required path attribute is missing.")) - - params = { - 'identity': cluster_id, - 'path': stripped_path, - } - obj = util.parse_request('ClusterCollectRequest', req, params) - return self.rpc_client.call(req.context, 'cluster_collect', obj) - - @wsgi.Controller.api_version('1.4') - @util.policy_enforce - def operation(self, req, cluster_id, body=None): - """Perform specified operation on the specified cluster.""" - body = body or {} - if len(body) < 1: - raise exc.HTTPBadRequest(_('No operation specified')) - - if len(body) > 1: - raise exc.HTTPBadRequest(_('Multiple operations specified')) - - operation = list(body.keys())[0] - params = { - 'identity': cluster_id, - 'operation': operation, - 'params': body[operation].get('params', {}), - 'filters': body[operation].get('filters', {}), - } - obj = util.parse_request('ClusterOperationRequest', req, params) - - res = self.rpc_client.call(req.context, 'cluster_op', obj) - - location = {'location': '/actions/%s' % res['action']} - res.update(location) - return res - - @util.policy_enforce - def delete(self, req, cluster_id, body=None): - if req.params.get('force') is not None: - force = util.parse_bool_param(consts.CLUSTER_DELETE_FORCE, - req.params.get('force')) - elif body: - force = body.get('force') - if force is None: - force = False - else: - force = False - - params = {'identity': cluster_id, 'force': force} - obj = util.parse_request('ClusterDeleteRequest', req, params) - res = self.rpc_client.call(req.context, 'cluster_delete', obj) - - action_id = res.pop('action') - result = {'location': '/actions/%s' % action_id} - return result diff --git a/senlin/api/openstack/v1/events.py b/senlin/api/openstack/v1/events.py deleted file mode 100644 index bb024e980..000000000 --- a/senlin/api/openstack/v1/events.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Event endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class EventController(wsgi.Controller): - """WSGI controller for events in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'events' - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.EVENT_OBJ_NAME: 'mixed', - consts.EVENT_OBJ_TYPE: 'mixed', - consts.EVENT_OBJ_ID: 'mixed', - consts.EVENT_CLUSTER_ID: 'mixed', - consts.EVENT_ACTION: 'mixed', - consts.EVENT_LEVEL: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - - for key in req.params.keys(): - if key not in whitelist.keys(): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - params = util.get_allowed_params(req.params, whitelist) - - project_safe = not util.parse_bool_param( - consts.PARAM_GLOBAL_PROJECT, - params.pop(consts.PARAM_GLOBAL_PROJECT, False)) - params['project_safe'] = project_safe - - obj = util.parse_request('EventListRequest', req, params) - events = self.rpc_client.call(req.context, "event_list", obj) - - return {'events': events} - - @util.policy_enforce - def get(self, req, event_id): - - obj = util.parse_request('EventGetRequest', req, - {'identity': event_id}) - event = self.rpc_client.call(req.context, 'event_get', obj) - - return {'event': event} diff --git a/senlin/api/openstack/v1/nodes.py b/senlin/api/openstack/v1/nodes.py deleted file mode 100644 index 196505f93..000000000 --- a/senlin/api/openstack/v1/nodes.py +++ /dev/null @@ -1,243 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Node endpoint for Senlin v1 REST API. -""" -from webob import exc - -from senlin.api.common import util -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class NodeController(wsgi.Controller): - """WSGI controller for nodes resource in Senlin v1 API.""" - - REQUEST_SCOPE = 'nodes' - - SUPPORTED_ACTIONS = ( - NODE_CHECK, NODE_RECOVER - ) = ( - 'check', 'recover' - ) - - def _remove_tainted(self, req, obj): - if req.version_request > vr.APIVersionRequest("1.12"): - return obj - - if 'tainted' in obj: - obj.pop('tainted') - - return obj - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.NODE_CLUSTER_ID: 'single', - consts.NODE_NAME: 'mixed', - consts.NODE_STATUS: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single' - } - for key in req.params.keys(): - if key not in whitelist.keys(): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - params = util.get_allowed_params(req.params, whitelist) - - project_safe = not util.parse_bool_param( - consts.PARAM_GLOBAL_PROJECT, - params.pop(consts.PARAM_GLOBAL_PROJECT, False)) - params['project_safe'] = project_safe - - obj = util.parse_request('NodeListRequest', req, params) - nodes = self.rpc_client.call(req.context, 'node_list', obj) - - nodes = [self._remove_tainted(req, n) for n in nodes] - return {'nodes': nodes} - - @util.policy_enforce - def create(self, req, body): - """Create a new node.""" - obj = util.parse_request('NodeCreateRequest', req, body, 'node') - node = self.rpc_client.call(req.context, 'node_create', - obj.node) - - node = self._remove_tainted(req, node) - - action_id = node.pop('action') - result = { - 'node': node, - 'location': '/actions/%s' % action_id, - } - return result - - @wsgi.Controller.api_version('1.7') - @util.policy_enforce - def adopt(self, req, body): - """Adopt a node for management.""" - obj = util.parse_request('NodeAdoptRequest', req, body) - node = self.rpc_client.call(req.context, 'node_adopt', obj) - - node = self._remove_tainted(req, node) - return {'node': node} - - @wsgi.Controller.api_version('1.7') - @util.policy_enforce - def adopt_preview(self, req, body): - """Preview a node adoption.""" - # make sure we will fall into the preview path - obj = util.parse_request('NodeAdoptPreviewRequest', req, body) - node = self.rpc_client.call(req.context, 'node_adopt_preview', obj) - return {'node_profile': node} - - @util.policy_enforce - def get(self, req, node_id): - params = {'identity': node_id} - key = consts.PARAM_SHOW_DETAILS - if key in req.params: - params['show_details'] = util.parse_bool_param( - key, req.params[key]) - - obj = util.parse_request('NodeGetRequest', req, params) - node = self.rpc_client.call(req.context, 'node_get', obj) - - node = self._remove_tainted(req, node) - return {'node': node} - - @util.policy_enforce - def update(self, req, node_id, body): - data = body.get('node') - if data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'node' key in request body.")) - params = data - params['identity'] = node_id - - obj = util.parse_request('NodeUpdateRequest', req, params) - node = self.rpc_client.call(req.context, 'node_update', obj) - - node = self._remove_tainted(req, node) - - action_id = node.pop('action') - result = { - 'node': node, - 'location': '/actions/%s' % action_id, - } - return result - - @util.policy_enforce - def delete(self, req, node_id, body=None): - if body: - force = body.get('force') - else: - force = False - - if force is not None: - force = util.parse_bool_param(consts.NODE_DELETE_FORCE, force) - - params = {'identity': node_id, 'force': force} - - obj = util.parse_request('NodeDeleteRequest', req, params) - res = self.rpc_client.call(req.context, 'node_delete', obj) - action_id = res.pop('action') - result = {'location': '/actions/%s' % action_id} - return result - - @util.policy_enforce - def action(self, req, node_id, body=None): - """Perform specified action on a node.""" - - body = body or {} - if len(body) == 0: - raise exc.HTTPBadRequest(_('No action specified.')) - - if len(body) > 1: - raise exc.HTTPBadRequest(_('Multiple actions specified.')) - - this_action = list(body.keys())[0] - if this_action not in self.SUPPORTED_ACTIONS: - msg = _("Unrecognized action '%s' specified") % this_action - raise exc.HTTPBadRequest(msg) - - params = body.get(this_action) - if this_action == self.NODE_CHECK: - res = self._do_check(req, node_id, params) - else: # self.NODE_RECOVER - res = self._do_recover(req, node_id, params) - - location = {'location': '/actions/%s' % res['action']} - res.update(location) - return res - - def _do_check(self, req, node_id, params): - if not isinstance(params, dict): - msg = _("The params provided is not a map.") - raise exc.HTTPBadRequest(msg) - - kwargs = { - 'identity': node_id, - 'params': params - } - - obj = util.parse_request('NodeCheckRequest', req, kwargs) - res = self.rpc_client.call(req.context, 'node_check', obj) - - return res - - def _do_recover(self, req, node_id, params): - if not isinstance(params, dict): - msg = _("The params provided is not a map.") - raise exc.HTTPBadRequest(msg) - - kwargs = { - 'identity': node_id, - 'params': params - } - - obj = util.parse_request('NodeRecoverRequest', req, kwargs) - res = self.rpc_client.call(req.context, 'node_recover', obj) - - return res - - @wsgi.Controller.api_version('1.4') - @util.policy_enforce - def operation(self, req, node_id, body=None): - """Perform the specified operation on the specified node.""" - - body = body or {} - if len(body) == 0: - raise exc.HTTPBadRequest(_('No operation specified.')) - - if len(body) > 1: - raise exc.HTTPBadRequest(_('Multiple operations specified.')) - - operation = list(body.keys())[0] - params = { - 'identity': node_id, - 'operation': operation, - 'params': body.get(operation), - } - - obj = util.parse_request('NodeOperationRequest', req, params) - node = self.rpc_client.call(req.context, 'node_op', obj) - - action_id = node.pop('action') - result = { - 'location': '/actions/%s' % action_id, - 'action': action_id - } - return result diff --git a/senlin/api/openstack/v1/policies.py b/senlin/api/openstack/v1/policies.py deleted file mode 100644 index 423234fc5..000000000 --- a/senlin/api/openstack/v1/policies.py +++ /dev/null @@ -1,104 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ -from senlin.objects import base as obj_base - - -class PolicyController(wsgi.Controller): - """WSGI controller for policy resource in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'policies' - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.POLICY_NAME: 'mixed', - consts.POLICY_TYPE: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - for key in req.params.keys(): - if key not in whitelist: - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - - params = util.get_allowed_params(req.params, whitelist) - is_global = params.pop(consts.PARAM_GLOBAL_PROJECT, False) - - unsafe = util.parse_bool_param(consts.PARAM_GLOBAL_PROJECT, is_global) - params['project_safe'] = not unsafe - obj = util.parse_request('PolicyListRequest', req, params) - policies = self.rpc_client.call(req.context, 'policy_list', obj) - return {'policies': policies} - - @util.policy_enforce - def create(self, req, body): - obj = util.parse_request('PolicyCreateRequest', req, body, 'policy') - result = self.rpc_client.call(req.context, 'policy_create', - obj.policy) - - return {'policy': result} - - @util.policy_enforce - def get(self, req, policy_id): - """Gets detailed information for a policy""" - body = {'identity': policy_id} - obj = util.parse_request('PolicyGetRequest', req, body) - policy = self.rpc_client.call(req.context, 'policy_get', obj) - - return {'policy': policy} - - @util.policy_enforce - def update(self, req, policy_id, body): - data = body.get('policy', None) - if data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'policy' key in request body.")) - body_req = obj_base.SenlinObject.normalize_req( - 'PolicyUpdateRequestBody', body['policy']) - obj = util.parse_request('PolicyUpdateRequest', req, - {'identity': policy_id, - 'policy': body_req}) - policy = self.rpc_client.call(req.context, 'policy_update', obj) - - return {'policy': policy} - - @util.policy_enforce - def delete(self, req, policy_id): - body = {'identity': policy_id} - obj = util.parse_request('PolicyDeleteRequest', req, body) - self.rpc_client.call(req.context, 'policy_delete', obj) - raise exc.HTTPNoContent() - - @wsgi.Controller.api_version('1.2') - @util.policy_enforce - def validate(self, req, body): - """Validate the policy spec user specified.""" - obj = util.parse_request('PolicyValidateRequest', req, body, - 'policy') - result = self.rpc_client.call(req.context, 'policy_validate', - obj.policy) - - return {'policy': result} diff --git a/senlin/api/openstack/v1/policy_types.py b/senlin/api/openstack/v1/policy_types.py deleted file mode 100644 index dd238fda7..000000000 --- a/senlin/api/openstack/v1/policy_types.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy type endpoint for Senlin v1 REST API. -""" - -from senlin.api.common import util -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi - - -class PolicyTypeController(wsgi.Controller): - """WSGI controller for policy types resource in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'policy_types' - - @util.policy_enforce - def index(self, req): - """Gets the supported policy types""" - - obj = util.parse_request('PolicyTypeListRequest', req, {}) - types = self.rpc_client.call(req.context, 'policy_type_list', obj) - result = types - if req.version_request <= vr.APIVersionRequest("1.4"): - # we return only policy name before microversion 1.5 - result = [{'name': '-'.join((t['name'], t['version']))} - for t in types] - return {'policy_types': result} - - @util.policy_enforce - def get(self, req, type_name): - """Gets detailed information for a policy-type""" - - obj = util.parse_request( - 'PolicyTypeGetRequest', req, {'type_name': type_name}) - content = self.rpc_client.call(req.context, 'policy_type_get', obj) - key = 'support_status' - if req.version_request <= vr.APIVersionRequest("1.4"): - # We return support_status from 1.5 - if key in content: - content.pop(key) - return {'policy_type': content} diff --git a/senlin/api/openstack/v1/profile_types.py b/senlin/api/openstack/v1/profile_types.py deleted file mode 100644 index 17dadac9b..000000000 --- a/senlin/api/openstack/v1/profile_types.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Profile type endpoint for Senlin v1 REST API. -""" - -from senlin.api.common import util -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi - - -class ProfileTypeController(wsgi.Controller): - """WSGI controller for profile types resource in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'profile_types' - - @util.policy_enforce - def index(self, req): - - obj = util.parse_request('ProfileTypeListRequest', req, {}) - types = self.rpc_client.call(req.context, 'profile_type_list', obj) - result = types - if req.version_request <= vr.APIVersionRequest("1.4"): - # We return only profile type name before 1.5 - result = [{'name': '-'.join((t['name'], t['version']))} - for t in types] - return {'profile_types': result} - - @util.policy_enforce - def get(self, req, type_name): - """Gets the details about a specified profile type.""" - - obj = util.parse_request( - 'ProfileTypeGetRequest', req, {'type_name': type_name}) - content = self.rpc_client.call(req.context, 'profile_type_get', obj) - key = 'support_status' - if req.version_request <= vr.APIVersionRequest("1.4"): - # We return support_status from 1.5 - if key in content: - content.pop(key) - return {'profile_type': content} - - @wsgi.Controller.api_version('1.4') - @util.policy_enforce - def ops(self, req, type_name): - """Lists the operations supported by the specified profile type.""" - - obj = util.parse_request( - 'ProfileTypeOpListRequest', req, {'type_name': type_name}) - return self.rpc_client.call(req.context, 'profile_type_ops', obj) diff --git a/senlin/api/openstack/v1/profiles.py b/senlin/api/openstack/v1/profiles.py deleted file mode 100644 index dfcec7b50..000000000 --- a/senlin/api/openstack/v1/profiles.py +++ /dev/null @@ -1,104 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Profile endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ -from senlin.objects import base as obj_base - - -class ProfileController(wsgi.Controller): - """WSGI controller for profiles resource in Senlin v1 API.""" - - # Define request scope - # (must match what is in policy file and policies in code.) - REQUEST_SCOPE = 'profiles' - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.PROFILE_NAME: 'mixed', - consts.PROFILE_TYPE: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - for key in req.params.keys(): - if key not in whitelist.keys(): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - - params = util.get_allowed_params(req.params, whitelist) - - project_safe = not util.parse_bool_param( - consts.PARAM_GLOBAL_PROJECT, - params.pop(consts.PARAM_GLOBAL_PROJECT, False)) - params['project_safe'] = project_safe - - obj = util.parse_request('ProfileListRequest', req, params) - profiles = self.rpc_client.call(req.context, 'profile_list', obj) - return {'profiles': profiles} - - @util.policy_enforce - def create(self, req, body): - - obj = util.parse_request('ProfileCreateRequest', req, body, 'profile') - result = self.rpc_client.call(req.context, 'profile_create', obj) - return {'profile': result} - - @wsgi.Controller.api_version('1.2') - @util.policy_enforce - def validate(self, req, body): - - obj = util.parse_request( - 'ProfileValidateRequest', req, body, 'profile') - result = self.rpc_client.call(req.context, 'profile_validate', obj) - return {'profile': result} - - @util.policy_enforce - def get(self, req, profile_id): - params = {'identity': profile_id} - - obj = util.parse_request('ProfileGetRequest', req, params) - profile = self.rpc_client.call(req.context, 'profile_get', obj) - return {'profile': profile} - - @util.policy_enforce - def update(self, req, profile_id, body): - profile_data = body.get('profile', None) - if profile_data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'profile' key in request body.")) - - body_req = obj_base.SenlinObject.normalize_req( - 'ProfileUpdateRequestBody', profile_data) - obj = util.parse_request( - 'ProfileUpdateRequest', req, {'identity': profile_id, - 'profile': body_req}) - - profile = self.rpc_client.call(req.context, 'profile_update', obj) - return {'profile': profile} - - @util.policy_enforce - def delete(self, req, profile_id): - - obj = util.parse_request( - 'ProfileDeleteRequest', req, {'identity': profile_id}) - self.rpc_client.call(req.context, 'profile_delete', obj) - raise exc.HTTPNoContent() diff --git a/senlin/api/openstack/v1/receivers.py b/senlin/api/openstack/v1/receivers.py deleted file mode 100644 index e57e62018..000000000 --- a/senlin/api/openstack/v1/receivers.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Receiver endpoint for Senlin v1 REST API. -""" - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.common.i18n import _ - - -class ReceiverController(wsgi.Controller): - """WSGI controller for receiver resource in Senlin v1 API.""" - - REQUEST_SCOPE = 'receivers' - - @util.policy_enforce - def index(self, req): - whitelist = { - consts.RECEIVER_NAME: 'mixed', - consts.RECEIVER_TYPE: 'mixed', - consts.RECEIVER_CLUSTER_ID: 'mixed', - consts.RECEIVER_USER_ID: 'mixed', - consts.RECEIVER_ACTION: 'mixed', - consts.PARAM_LIMIT: 'single', - consts.PARAM_MARKER: 'single', - consts.PARAM_SORT: 'single', - consts.PARAM_GLOBAL_PROJECT: 'single', - } - for key in req.params.keys(): - if key not in whitelist.keys(): - raise exc.HTTPBadRequest(_('Invalid parameter %s') % key) - - params = util.get_allowed_params(req.params, whitelist) - - project_safe = not util.parse_bool_param( - consts.PARAM_GLOBAL_PROJECT, - params.pop(consts.PARAM_GLOBAL_PROJECT, False)) - params['project_safe'] = project_safe - - obj = util.parse_request('ReceiverListRequest', req, params) - receivers = self.rpc_client.call(req.context, 'receiver_list', obj) - - return {'receivers': receivers} - - @util.policy_enforce - def create(self, req, body): - - obj = util.parse_request( - 'ReceiverCreateRequest', req, body, 'receiver') - result = self.rpc_client.call(req.context, 'receiver_create', - obj.receiver) - - return {'receiver': result} - - @util.policy_enforce - def get(self, req, receiver_id): - obj = util.parse_request( - 'ReceiverGetRequest', req, {'identity': receiver_id}) - receiver = self.rpc_client.call(req.context, 'receiver_get', obj) - return {'receiver': receiver} - - @util.policy_enforce - def update(self, req, receiver_id, body): - receiver_data = body.get('receiver', None) - if receiver_data is None: - raise exc.HTTPBadRequest(_("Malformed request data, missing " - "'receiver' key in request body.")) - - kwargs = receiver_data - kwargs['identity'] = receiver_id - obj = util.parse_request('ReceiverUpdateRequest', req, - kwargs) - receiver = self.rpc_client.call(req.context, 'receiver_update', obj) - - return {'receiver': receiver} - - @util.policy_enforce - def delete(self, req, receiver_id): - - obj = util.parse_request( - 'ReceiverDeleteRequest', req, {'identity': receiver_id}) - self.rpc_client.call(req.context, 'receiver_delete', obj) - raise exc.HTTPNoContent() - - @util.policy_enforce - def notify(self, req, receiver_id, body=None): - - obj = util.parse_request( - 'ReceiverNotifyRequest', req, {'identity': receiver_id}) - self.rpc_client.call(req.context, 'receiver_notify', obj) - raise exc.HTTPNoContent() diff --git a/senlin/api/openstack/v1/router.py b/senlin/api/openstack/v1/router.py deleted file mode 100644 index 218ce8c5e..000000000 --- a/senlin/api/openstack/v1/router.py +++ /dev/null @@ -1,336 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import routes - -from senlin.api.common import wsgi -from senlin.api.openstack.v1 import actions -from senlin.api.openstack.v1 import build_info -from senlin.api.openstack.v1 import cluster_policies -from senlin.api.openstack.v1 import clusters -from senlin.api.openstack.v1 import events -from senlin.api.openstack.v1 import nodes -from senlin.api.openstack.v1 import policies -from senlin.api.openstack.v1 import policy_types -from senlin.api.openstack.v1 import profile_types -from senlin.api.openstack.v1 import profiles -from senlin.api.openstack.v1 import receivers -from senlin.api.openstack.v1 import services -from senlin.api.openstack.v1 import version -from senlin.api.openstack.v1 import webhooks - - -class API(wsgi.Router): - """WSGI router for Cluster v1 REST API requests.""" - - def __init__(self, conf, **local_conf): - self.conf = conf - mapper = routes.Mapper() - - # version - res = wsgi.Resource(version.VersionController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("version", - "/", - action="version", - conditions={'method': 'GET'}) - - # Profile_types - res = wsgi.Resource(profile_types.ProfileTypeController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("profile_type_index", - "/profile-types", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("profile_type_get", - "/profile-types/{type_name}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("profile_type_ops", - "/profile-types/{type_name}/ops", - action="ops", - conditions={'method': 'GET'}) - - # Profiles - res = wsgi.Resource(profiles.ProfileController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("profile_index", - "/profiles", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("profile_create", - "/profiles", - action="create", - conditions={'method': 'POST'}, - success=201) - sub_mapper.connect("profile_get", - "/profiles/{profile_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("profile_update", - "/profiles/{profile_id}", - action="update", - conditions={'method': 'PATCH'}) - sub_mapper.connect("profile_delete", - "/profiles/{profile_id}", - action="delete", - conditions={'method': 'DELETE'}) - sub_mapper.connect("profile_validate", - "/profiles/validate", - action="validate", - conditions={'method': 'POST'}) - - # Policy Types - res = wsgi.Resource(policy_types.PolicyTypeController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - # Policy collection - sub_mapper.connect("policy_type_index", - "/policy-types", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("policy_type_get", - "/policy-types/{type_name}", - action="get", - conditions={'method': 'GET'}) - - # Policies - res = wsgi.Resource(policies.PolicyController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("policy_index", - "/policies", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("policy_create", - "/policies", - action="create", - conditions={'method': 'POST'}, - success=201) - sub_mapper.connect("policy_get", - "/policies/{policy_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("policy_update", - "/policies/{policy_id}", - action="update", - conditions={'method': 'PATCH'}) - sub_mapper.connect("policy_delete", - "/policies/{policy_id}", - action="delete", - conditions={'method': 'DELETE'}) - sub_mapper.connect("policy_validate", - "/policies/validate", - action="validate", - conditions={'method': 'POST'}) - - # Clusters - res = wsgi.Resource(clusters.ClusterController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("cluster_index", - "/clusters", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("cluster_create", - "/clusters", - action="create", - conditions={'method': 'POST'}, - success=202) - sub_mapper.connect("cluster_get", - "/clusters/{cluster_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("cluster_update", - "/clusters/{cluster_id}", - action="update", - conditions={'method': 'PATCH'}, - success=202) - sub_mapper.connect("cluster_action", - "/clusters/{cluster_id}/actions", - action="action", - conditions={'method': 'POST'}, - success=202) - sub_mapper.connect("cluster_collect", - "/clusters/{cluster_id}/attrs/{path}", - action="collect", - conditions={'method': 'GET'}) - sub_mapper.connect("cluster_delete", - "/clusters/{cluster_id}", - action="delete", - conditions={'method': 'DELETE'}, - success=202) - sub_mapper.connect("cluster_operation", - "/clusters/{cluster_id}/ops", - action="operation", - conditions={'method': 'POST'}, - success=202) - - # Nodes - res = wsgi.Resource(nodes.NodeController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("node_index", - "/nodes", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("node_create", - "/nodes", - action="create", - conditions={'method': 'POST'}, - success=202) - sub_mapper.connect("node_adopt", - "/nodes/adopt", - action="adopt", - conditions={'method': 'POST'}) - sub_mapper.connect("node_adopt_preview", - "/nodes/adopt-preview", - action="adopt_preview", - conditions={'method': 'POST'}) - sub_mapper.connect("node_get", - "/nodes/{node_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("node_update", - "/nodes/{node_id}", - action="update", - conditions={'method': 'PATCH'}, - success=202) - sub_mapper.connect("node_action", - "/nodes/{node_id}/actions", - action="action", - conditions={'method': 'POST'}, - success=202) - sub_mapper.connect("node_delete", - "/nodes/{node_id}", - action="delete", - conditions={'method': 'DELETE'}, - success=202) - sub_mapper.connect("node_operation", - "/nodes/{node_id}/ops", - action="operation", - conditions={'method': 'POST'}, - success=202) - - # Cluster Policies - res = wsgi.Resource(cluster_policies.ClusterPolicyController(conf)) - policies_path = "/clusters/{cluster_id}" - with mapper.submapper(controller=res, - path_prefix=policies_path) as sub_mapper: - sub_mapper.connect("cluster_policy_list", - "/policies", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("cluster_policy_show", - "/policies/{policy_id}", - action="get", - conditions={'method': 'GET'}) - - # Actions - res = wsgi.Resource(actions.ActionController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("action_index", - "/actions", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("action_create", - "/actions", - action="create", - conditions={'method': 'POST'}, - success=201) - sub_mapper.connect("action_get", - "/actions/{action_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("action_update", - "/actions/{action_id}", - action="update", - conditions={'method': 'PATCH'}) - - # Receivers - res = wsgi.Resource(receivers.ReceiverController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("receivers_index", - "/receivers", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("receiver_create", - "/receivers", - action="create", - conditions={'method': 'POST'}, - success=201) - sub_mapper.connect("receiver_get", - "/receivers/{receiver_id}", - action="get", - conditions={'method': 'GET'}) - sub_mapper.connect("receiver_update", - "/receivers/{receiver_id}", - action="update", - conditions={'method': 'PATCH'}) - sub_mapper.connect("receiver_delete", - "/receivers/{receiver_id}", - action="delete", - conditions={'method': 'DELETE'}) - sub_mapper.connect("receiver_notify", - "/receivers/{receiver_id}/notify", - action="notify", - conditions={'method': 'POST'}) - - # Webhooks - res = wsgi.Resource(webhooks.WebhookController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("webhook_trigger", - "/webhooks/{webhook_id}/trigger", - action="trigger", - conditions={'method': 'POST'}, - success=202) - - # Events - res = wsgi.Resource(events.EventController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("event_index", - "/events", - action="index", - conditions={'method': 'GET'}) - sub_mapper.connect("event_get", - "/events/{event_id}", - action="get", - conditions={'method': 'GET'}) - - # Info - res = wsgi.Resource(build_info.BuildInfoController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("build_info", - "/build-info", - action="build_info", - conditions={'method': 'GET'}) - - super(API, self).__init__(mapper) - - # Services - res = wsgi.Resource(services.ServiceController(conf)) - with mapper.submapper(controller=res) as sub_mapper: - - sub_mapper.connect("service_index", - "/services", - action="index", - conditions={'method': 'GET'}) - - super(API, self).__init__(mapper) diff --git a/senlin/api/openstack/v1/services.py b/senlin/api/openstack/v1/services.py deleted file mode 100644 index 43c70baf2..000000000 --- a/senlin/api/openstack/v1/services.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import exception -from senlin.objects import service as service_obj - -CONF = cfg.CONF - - -class ServiceController(wsgi.Controller): - """WSGI controller for Services in Senlin v1 API.""" - - REQUEST_SCOPE = 'services' - - @util.policy_enforce - def index(self, req): - if not req.context.is_admin: - raise exception.Forbidden() - now = timeutils.utcnow(with_timezone=True) - _services = service_obj.Service.get_all(req.context) - svcs = [] - for svc in _services: - updated_at = svc.updated_at - delta = now - (svc.updated_at or svc.created_at) - delta_sec = delta.total_seconds() - alive = abs(delta_sec) <= CONF.service_down_time - art = (alive and "up") or "down" - active = 'enabled' - if svc.disabled: - active = 'disabled' - if updated_at: - updated_at = timeutils.normalize_time(updated_at) - ret_fields = {'id': svc.id, 'host': svc.host, - 'binary': svc.binary, 'topic': svc.topic, - 'disabled_reason': svc.disabled_reason, - 'status': active, 'state': art, - 'updated_at': updated_at} - svcs.append(ret_fields) - return {'services': svcs} diff --git a/senlin/api/openstack/v1/version.py b/senlin/api/openstack/v1/version.py deleted file mode 100644 index 1bdb2be55..000000000 --- a/senlin/api/openstack/v1/version.py +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import webob.dec - -from senlin.api.common import version_request as vr - - -class VersionController(object): - """WSGI controller for version in Senlin v1 API.""" - - # NOTE: A version change is required when you make any change to the API. - # This includes any semantic changes which may not affect the input or - # output formats or even originate in the API code layer. - _MIN_API_VERSION = "1.0" - _MAX_API_VERSION = "1.14" - - DEFAULT_API_VERSION = _MIN_API_VERSION - - def __init__(self, conf): - self.conf = conf - - @webob.dec.wsgify - def __call__(self, req): - info = self.version(req) - body = jsonutils.dumps(info) - response = webob.Response(request=req, content_type='application/json') - response.body = encodeutils.safe_encode(body) - - return response - - @classmethod - def version_info(cls, req): - return { - "id": "1.0", - "status": "CURRENT", - "updated": "2016-01-18T00:00:00Z", - "media-types": [ - { - "base": "application/json", - "type": "application/vnd.openstack.clustering-v1+json" - } - ], - "links": [{ - "href": req.application_url.rstrip('/') + '/v1', - "rel": "self"}, { - "rel": "help", - "href": "https://docs.openstack.org/api-ref/clustering" - }], - "min_version": cls._MIN_API_VERSION, - "max_version": cls._MAX_API_VERSION, - } - - def version(self, req): - return {"version": self.version_info(req)} - - @classmethod - def min_api_version(cls): - return vr.APIVersionRequest(cls._MIN_API_VERSION) - - @classmethod - def max_api_version(cls): - return vr.APIVersionRequest(cls._MAX_API_VERSION) - - @classmethod - def is_supported(cls, req, min_ver=None, max_ver=None): - """Check if API request version satisfies version restrictions. - - :param req: request object - :param min_ver: minimal version of API needed. - :param max_ver: maximum version of API needed. - :returns: True if request satisfies minimal and maximum API version - requirements. False in other case. - """ - min_version = min_ver or cls._MIN_API_VERSION - max_version = max_ver or cls._MAX_API_VERSION - return (vr.APIVersionRequest(max_version) >= req.version_request >= - vr.APIVersionRequest(min_version)) diff --git a/senlin/api/openstack/v1/webhooks.py b/senlin/api/openstack/v1/webhooks.py deleted file mode 100644 index b90420d22..000000000 --- a/senlin/api/openstack/v1/webhooks.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Webhook endpoint for Senlin v1 REST API. -""" - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.objects import base as obj_base - - -class WebhookController(wsgi.Controller): - """WSGI controller for webhooks resource in Senlin v1 API.""" - - REQUEST_SCOPE = 'webhooks' - - @wsgi.Controller.api_version("1.0", "1.9") - @util.policy_enforce - def trigger(self, req, webhook_id, body=None): - if body is None: - body = {'params': None} - - webhook_version = req.params.getall('V') - if webhook_version == ['1']: - body = obj_base.SenlinObject.normalize_req( - 'WebhookTriggerRequestBody', body) - obj = util.parse_request( - 'WebhookTriggerRequest', req, {'identity': webhook_id, - 'body': body}) - else: - # webhook version 2 and greater accept parameters other than param - obj = util.parse_request( - 'WebhookTriggerRequestParamsInBody', req, - {'identity': webhook_id, 'body': body}) - - res = self.rpc_client.call(req.context, 'webhook_trigger', obj) - location = {'location': '/actions/%s' % res['action']} - res.update(location) - return res - - @wsgi.Controller.api_version("1.10") # noqa - @util.policy_enforce - def trigger(self, req, webhook_id, body=None): # noqa F811 - obj = util.parse_request( - 'WebhookTriggerRequestParamsInBody', req, {'identity': webhook_id, - 'body': body}) - - res = self.rpc_client.call(req.context, 'webhook_trigger', obj) - location = {'location': '/actions/%s' % res['action']} - res.update(location) - return res diff --git a/senlin/api/openstack/versions.py b/senlin/api/openstack/versions.py deleted file mode 100644 index c3fbd7c2f..000000000 --- a/senlin/api/openstack/versions.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Controller that returns information on the senlin API versions -""" - -import http.client as http_client -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import webob.dec - -from senlin.api.openstack.v1 import version as v1_controller - - -class Controller(object): - """A controller that produces information on the senlin API versions.""" - - Controllers = { - '1.0': v1_controller.VersionController, - } - - def __init__(self, conf): - self.conf = conf - - @webob.dec.wsgify - def __call__(self, req): - """Respond to a request for all OpenStack API versions.""" - - versions = [] - for ver, vc in self.Controllers.items(): - versions.append(vc.version_info(req)) - - body = jsonutils.dumps(dict(versions=versions)) - - response = webob.Response(request=req, - status=http_client.MULTIPLE_CHOICES, - content_type='application/json') - response.body = encodeutils.safe_encode(body) - - return response - - def get_controller(self, version): - """Return the version specific controller. - - :param version: The version string for mapping. - :returns: A version controller instance or ``None``. - """ - return self.Controllers.get(version, None) diff --git a/senlin/cmd/__init__.py b/senlin/cmd/__init__.py deleted file mode 100644 index 3f38f6f53..000000000 --- a/senlin/cmd/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -eventlet.monkey_patch(os=False) -# Monkey patch the original current_thread to use the up-to-date _active -# global variable. See https://bugs.launchpad.net/bugs/1863021 and -# https://github.com/eventlet/eventlet/issues/592 -import __original_module_threading as orig_threading # noqa -import threading # noqa -orig_threading.current_thread.__globals__['_active'] = threading._active diff --git a/senlin/cmd/api.py b/senlin/cmd/api.py deleted file mode 100644 index 624d75a36..000000000 --- a/senlin/cmd/api.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Senlin API Server. -""" -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_service import systemd - -from senlin.api.common import wsgi -from senlin.common import config -from senlin.common import messaging -from senlin.common import profiler -import senlin.conf -from senlin import objects -from senlin import version - -CONF = senlin.conf.CONF -LOG = logging.getLogger('senlin.api') - - -def main(): - config.parse_args(sys.argv, 'senlin-api') - logging.setup(CONF, 'senlin-api') - gmr.TextGuruMeditation.setup_autorun(version) - objects.register_all() - messaging.setup() - - app = wsgi.load_paste_app() - - host = CONF.senlin_api.bind_host - port = CONF.senlin_api.bind_port - LOG.info('Starting Senlin API on %(host)s:%(port)s', - {'host': host, 'port': port}) - profiler.setup('senlin-api', host) - server = wsgi.Server('senlin-api', CONF.senlin_api) - server.start(app, default_port=port) - systemd.notify_once() - server.wait() diff --git a/senlin/cmd/api_wsgi.py b/senlin/cmd/api_wsgi.py deleted file mode 100644 index bee95d820..000000000 --- a/senlin/cmd/api_wsgi.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""WSGI script for senlin-api. - -Use this file for deploying senlin-api under Apache2(mode-wsgi). -""" -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from senlin.api.common import wsgi -from senlin.common import config -from senlin.common import messaging -from senlin.common import profiler -from senlin import objects - - -def init_app(): - config.parse_args(sys.argv, 'senlin-api') - logging.setup(cfg.CONF, 'senlin-api') - objects.register_all() - messaging.setup() - - profiler.setup('senlin-api', cfg.CONF.host) - return wsgi.load_paste_app() diff --git a/senlin/cmd/conductor.py b/senlin/cmd/conductor.py deleted file mode 100644 index d8041b884..000000000 --- a/senlin/cmd/conductor.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Senlin Conductor. -""" -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_service import service - -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -import senlin.conf -from senlin import objects -from senlin import version - -CONF = senlin.conf.CONF - - -def main(): - config.parse_args(sys.argv, 'senlin-conductor') - logging.setup(CONF, 'senlin-conductor') - logging.set_defaults() - gmr.TextGuruMeditation.setup_autorun(version) - objects.register_all() - messaging.setup() - - from senlin.conductor import service as conductor - - profiler.setup('senlin-conductor', CONF.host) - srv = conductor.ConductorService(CONF.host, consts.CONDUCTOR_TOPIC) - launcher = service.launch(CONF, srv, - workers=CONF.conductor.workers, - restart_method='mutate') - # the following periodic tasks are intended serve as HA checking - # srv.create_periodic_tasks() - launcher.wait() diff --git a/senlin/cmd/engine.py b/senlin/cmd/engine.py deleted file mode 100644 index 911e7373b..000000000 --- a/senlin/cmd/engine.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Senlin Engine. -""" -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_service import service - -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -import senlin.conf -from senlin import objects -from senlin import version - -CONF = senlin.conf.CONF - - -def main(): - config.parse_args(sys.argv, 'senlin-engine') - logging.setup(CONF, 'senlin-engine') - logging.set_defaults() - gmr.TextGuruMeditation.setup_autorun(version) - objects.register_all() - messaging.setup() - - from senlin.engine import service as engine - - profiler.setup('senlin-engine', CONF.host) - srv = engine.EngineService(CONF.host, - consts.ENGINE_TOPIC) - launcher = service.launch(CONF, srv, - workers=CONF.engine.workers, - restart_method='mutate') - launcher.wait() diff --git a/senlin/cmd/health_manager.py b/senlin/cmd/health_manager.py deleted file mode 100644 index 44abab5d3..000000000 --- a/senlin/cmd/health_manager.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Senlin Health-Manager. -""" -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_service import service - -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -import senlin.conf -from senlin import objects -from senlin import version - -CONF = senlin.conf.CONF - - -def main(): - config.parse_args(sys.argv, 'senlin-health-manager') - logging.setup(CONF, 'senlin-health-manager') - logging.set_defaults() - gmr.TextGuruMeditation.setup_autorun(version) - objects.register_all() - messaging.setup() - - from senlin.health_manager import service as health_manager - - profiler.setup('senlin-health-manager', CONF.host) - srv = health_manager.HealthManagerService(CONF.host, - consts.HEALTH_MANAGER_TOPIC) - launcher = service.launch(CONF, srv, - workers=CONF.health_manager.workers, - restart_method='mutate') - launcher.wait() diff --git a/senlin/cmd/manage.py b/senlin/cmd/manage.py deleted file mode 100644 index 345626107..000000000 --- a/senlin/cmd/manage.py +++ /dev/null @@ -1,232 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -CLI interface for senlin management. -""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from senlin.common import config -from senlin.common import context -from senlin.common.i18n import _ -from senlin.db import api -from senlin.objects import service as service_obj - -CONF = cfg.CONF - - -def do_db_version(): - """Print database's current migration level.""" - print(api.db_version()) - - -def do_db_sync(): - """Place a database under migration control and upgrade. - - DB is created first if necessary. - """ - api.db_sync() - - -def do_event_purge(): - """Purge the specified event records in senlin's database.""" - if CONF.command.age < 0: - print(_("Age must be a positive integer.")) - return - api.event_purge(api.get_engine(), - CONF.command.project_id, - CONF.command.granularity, - CONF.command.age) - - -def do_action_purge(): - """Purge the specified action records in senlin's database.""" - age = CONF.command.age - - if age < 0: - print(_("Age must be a positive integer.")) - return - - if CONF.command.granularity == 'days': - age = age * 86400 - elif CONF.command.granularity == 'hours': - age = age * 3600 - elif CONF.command.granularity == 'minutes': - age = age * 60 - - if age < CONF.default_action_timeout: - print(_("Age must be greater than the default action timeout.")) - return - api.action_purge(api.get_engine(), - CONF.command.project_id, - CONF.command.granularity, - CONF.command.age) - - -class ServiceManageCommand(object): - def __init__(self): - self.ctx = context.get_admin_context() - - def _format_service(self, service): - if service is None: - return - - status = 'up' - CONF.import_opt('periodic_interval', 'senlin.conf') - max_interval = 2.2 * CONF.periodic_interval - if timeutils.is_older_than(service.updated_at, max_interval): - status = 'down' - - result = { - 'service_id': service.id, - 'binary': service.binary, - 'host': service.host, - 'topic': service.topic, - 'created_at': service.created_at, - 'updated_at': service.updated_at, - 'status': status - } - return result - - def service_list(self): - services = [self._format_service(service) - for service in service_obj.Service.get_all(self.ctx)] - - print_format = "%-36s %-24s %-16s %-16s %-10s %-24s %-24s" - print(print_format % (_('Service ID'), - _('Host'), - _('Binary'), - _('Topic'), - _('Status'), - _('Created At'), - _('Updated At'))) - - for svc in services: - print(print_format % (svc['service_id'], - svc['host'], - svc['binary'], - svc['topic'], - svc['status'], - svc['created_at'], - svc['updated_at'])) - - def service_clean(self): - for service in service_obj.Service.get_all(self.ctx): - svc = self._format_service(service) - if svc['status'] == 'down': - print(_('Dead service %s is removed.') % svc['service_id']) - service_obj.Service.delete(svc['service_id']) - - @staticmethod - def add_service_parsers(subparsers): - service_parser = subparsers.add_parser('service') - service_parser.set_defaults(command_object=ServiceManageCommand) - service_subparsers = service_parser.add_subparsers(dest='action') - list_parser = service_subparsers.add_parser('list') - list_parser.set_defaults(func=ServiceManageCommand().service_list) - remove_parser = service_subparsers.add_parser('clean') - remove_parser.set_defaults(func=ServiceManageCommand().service_clean) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser('db_version') - parser.set_defaults(func=do_db_version) - - parser = subparsers.add_parser('db_sync') - parser.set_defaults(func=do_db_sync) - ServiceManageCommand.add_service_parsers(subparsers) - parser.add_argument('version', nargs='?') - parser.add_argument('current_version', nargs='?') - - parser = subparsers.add_parser('event_purge') - parser.set_defaults(func=do_event_purge) - parser.add_argument('-p', - '--project-id', - nargs='?', - metavar='', - help=_("Purge event records with specified project. " - "This can be specified multiple times, or once " - "with parameters separated by semicolon."), - action='append') - parser.add_argument('-g', - '--granularity', - default='days', - choices=['days', 'hours', 'minutes', 'seconds'], - help=_("Purge event records which were created in the " - "specified time period. The time is specified " - "by age and granularity, whose value must be " - "one of 'days', 'hours', 'minutes' or " - "'seconds' (default).")) - parser.add_argument('age', - type=int, - default=30, - help=_("Purge event records which were created in the " - "specified time period. The time is specified " - "by age and granularity. For example, " - "granularity=hours and age=2 means purging " - "events created two hours ago. Defaults to " - "30.")) - - parser = subparsers.add_parser('action_purge') - parser.set_defaults(func=do_action_purge) - parser.add_argument('-p', - '--project-id', - nargs='?', - metavar='', - help=_("Purge action records with specified project. " - "This can be specified multiple times, or once " - "with parameters separated by semicolon."), - action='append') - parser.add_argument('-g', - '--granularity', - default='days', - choices=['days', 'hours', 'minutes', 'seconds'], - help=_("Purge action records which were created in " - "the specified time period. The time is " - "specified by age and granularity, whose value " - "must be one of 'days', 'hours', 'minutes' or " - "'seconds' (default).")) - parser.add_argument('age', - type=int, - default=30, - help=_("Purge action records which were created in " - "the specified time period. The time is " - "specified by age and granularity. For " - "example, granularity=hours and age=2 means " - "purging actions created two hours ago. " - "Defaults to 30.")) - - -command_opt = cfg.SubCommandOpt('command', - title='Commands', - help=_('Show available commands.'), - handler=add_command_parsers) - - -def main(): - try: - CONF.register_cli_opt(command_opt) - default_config_files = cfg.find_config_files('senlin', 'senlin-manage') - config.parse_args(sys.argv, 'senlin-manage', default_config_files) - logging.setup(CONF, 'senlin-manage') - except RuntimeError as e: - sys.exit("ERROR: %s" % e) - - try: - CONF.command.func() - except Exception as e: - sys.exit("ERROR: %s" % e) diff --git a/senlin/cmd/status.py b/senlin/cmd/status.py deleted file mode 100644 index 2d3bde105..000000000 --- a/senlin/cmd/status.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2018 NEC, Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo_config import cfg -from oslo_upgradecheck import common_checks -from oslo_upgradecheck import upgradecheck - -from senlin.common.i18n import _ -from senlin.db import api -from senlin.db.sqlalchemy import api as sql_api - -from sqlalchemy import MetaData, Table, select, column - - -class Checks(upgradecheck.UpgradeCommands): - - """Upgrade checks for the senlin-status upgrade check command - - Upgrade checks should be added as separate methods in this class - and added to _upgrade_checks tuple. - """ - - def _check_healthpolicy(self): - """Check if version 1.0 health policies exists - - Stein introduces health policy version 1.1 which is incompatible with - health policy version 1.0. Users are required to delete version 1.0 - health policies before upgrade and recreate them in version 1.1 format - after upgrading. - """ - - engine = api.get_engine() - metadata = MetaData() - metadata.bind = engine - - policy = Table('policy', metadata, autoload_with=engine) - - healthpolicy_select = ( - select(column('name')) - .select_from(policy) - .where(column('type') == 'senlin.policy.health-1.0') - ) - - with sql_api.session_for_read() as session: - healthpolicy_rows = session.execute(healthpolicy_select).fetchall() - - if not healthpolicy_rows: - return upgradecheck.Result(upgradecheck.Code.SUCCESS) - - healthpolicy_names = [row[0] for row in healthpolicy_rows] - error_msg = _('The following version 1.0 health policies must be ' - 'deleted before upgrade: \'{}\'. After upgrading, the ' - 'health policies can be recreated in version 1.1 ' - 'format.').format(', '.join(healthpolicy_names)) - return upgradecheck.Result(upgradecheck.Code.FAILURE, error_msg) - - # The format of the check functions is to return an - # oslo_upgradecheck.upgradecheck.Result - # object with the appropriate - # oslo_upgradecheck.upgradecheck.Code and details set. - # If the check hits warnings or failures then those should be stored - # in the returned Result's "details" attribute. The - # summary will be rolled up at the end of the check() method. - _upgrade_checks = ( - # In the future there should be some real checks added here - (_('HealthPolicy'), _check_healthpolicy), - (_('Policy File JSON to YAML Migration'), - (common_checks.check_policy_json, {'conf': cfg.CONF})), - ) - - -def main(): - return upgradecheck.main( - cfg.CONF, project='senlin', upgrade_command=Checks()) - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/senlin/common/__init__.py b/senlin/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/common/config.py b/senlin/common/config.py deleted file mode 100644 index 18ade8700..000000000 --- a/senlin/common/config.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for configuring Senlin -""" -from oslo_log import log -from oslo_middleware import cors -from oslo_policy import opts -from oslo_utils import importutils - -import senlin.conf -from senlin import version - -profiler = importutils.try_import('osprofiler.opts') - -CONF = senlin.conf.CONF - - -def parse_args(argv, name, default_config_files=None): - log.register_options(CONF) - - if profiler: - profiler.set_defaults(CONF) - - set_config_defaults() - - CONF( - argv[1:], - project='senlin', - prog=name, - version=version.version_info.version_string(), - default_config_files=default_config_files, - ) - - -def set_config_defaults(): - """Update default configuration options for oslo.middleware.""" - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id', - 'X-OpenStack-Request-ID'], - expose_headers=['X-Auth-Token', - 'X-Subject-Token', - 'X-Service-Token', - 'X-OpenStack-Request-ID'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH']) - - # TODO(gmann): Remove setting the default value of config policy_file - # once oslo_policy change the default value to 'policy.yaml'. - # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 - opts.set_defaults(CONF, 'policy.yaml') diff --git a/senlin/common/constraints.py b/senlin/common/constraints.py deleted file mode 100644 index 73d2cc9a5..000000000 --- a/senlin/common/constraints.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from senlin.common import exception as exc -from senlin.common.i18n import _ - - -class BaseConstraint(collections.abc.Mapping): - KEYS = ( - TYPE, CONSTRAINT, - ) = ( - 'type', 'constraint', - ) - - def __str__(self): - """Utility method for generating schema docs.""" - return self.desc() - - def validate(self, value, schema=None, context=None): - """Base entry for validation.""" - if not self._validate(value, schema=schema, context=context): - raise ValueError(self._error(value)) - - @classmethod - def _name(cls): - return cls.__name__ - - def __getitem__(self, key): - if key == self.TYPE: - return self._name() - elif key == self.CONSTRAINT: - return self._constraint() - - raise KeyError(key) - - def __iter__(self): - for k in self.KEYS: - try: - self[k] - except KeyError: - pass - else: - yield k - - def __len__(self): - return len(list(iter(self))) - - -class AllowedValues(BaseConstraint): - def __init__(self, allowed_values): - if (not isinstance(allowed_values, collections.abc.Sequence) or - isinstance(allowed_values, str)): - msg = _('AllowedValues must be a list or a string') - raise exc.ESchema(message=msg) - - self.allowed = tuple(allowed_values) - - def desc(self): - values = ', '.join(str(v) for v in self.allowed) - return _('Allowed values: %s') % values - - def _error(self, value): - values = ', '.join(str(v) for v in self.allowed) - return _("'%(value)s' must be one of the allowed values: " - "%(allowed)s") % dict(value=value, allowed=values) - - def _validate(self, value, schema=None, context=None): - if isinstance(value, list): - return all(v in self.allowed for v in value) - - # try implicit type conversion - if schema is not None: - _allowed = tuple(schema.to_schema_type(v) - for v in self.allowed) - return schema.to_schema_type(value) in _allowed - return value in self.allowed - - def _constraint(self): - return list(self.allowed) diff --git a/senlin/common/consts.py b/senlin/common/consts.py deleted file mode 100644 index e31091005..000000000 --- a/senlin/common/consts.py +++ /dev/null @@ -1,362 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -RPC_ATTRS = ( - CONDUCTOR_TOPIC, - ENGINE_TOPIC, - HEALTH_MANAGER_TOPIC, - RPC_API_VERSION_BASE, - RPC_API_VERSION, -) = ( - 'senlin-conductor', - 'senlin-engine', - 'senlin-health-manager', - '1.0', - '1.1', -) - -RPC_PARAMS = ( - PARAM_LIMIT, PARAM_MARKER, PARAM_GLOBAL_PROJECT, - PARAM_SHOW_DETAILS, PARAM_SORT, -) = ( - 'limit', 'marker', 'global_project', - 'show_details', 'sort', -) - -SUPPORT_STATUSES = ( - EXPERIMENTAL, SUPPORTED, DEPRECATED, UNSUPPORTED, -) = ( - 'EXPERIMENTAL', 'SUPPORTED', 'DEPRECATED', 'UNSUPPORTED', -) - -ACTION_CAUSES = ( - CAUSE_RPC, CAUSE_DERIVED, CAUSE_DERIVED_LCH -) = ( - 'RPC Request', - 'Derived Action', - 'Derived Action with Lifecycle Hook' -) - -CLUSTER_ACTION_NAMES = ( - CLUSTER_CREATE, CLUSTER_DELETE, CLUSTER_UPDATE, - CLUSTER_ADD_NODES, CLUSTER_DEL_NODES, CLUSTER_RESIZE, - CLUSTER_CHECK, CLUSTER_RECOVER, CLUSTER_REPLACE_NODES, - CLUSTER_SCALE_OUT, CLUSTER_SCALE_IN, - CLUSTER_ATTACH_POLICY, CLUSTER_DETACH_POLICY, CLUSTER_UPDATE_POLICY, - CLUSTER_OPERATION, -) = ( - 'CLUSTER_CREATE', 'CLUSTER_DELETE', 'CLUSTER_UPDATE', - 'CLUSTER_ADD_NODES', 'CLUSTER_DEL_NODES', 'CLUSTER_RESIZE', - 'CLUSTER_CHECK', 'CLUSTER_RECOVER', 'CLUSTER_REPLACE_NODES', - 'CLUSTER_SCALE_OUT', 'CLUSTER_SCALE_IN', - 'CLUSTER_ATTACH_POLICY', 'CLUSTER_DETACH_POLICY', 'CLUSTER_UPDATE_POLICY', - 'CLUSTER_OPERATION', -) - -CLUSTER_SCALE_ACTIONS = [CLUSTER_SCALE_IN, CLUSTER_SCALE_OUT] - -NODE_ACTION_NAMES = ( - NODE_CREATE, NODE_DELETE, NODE_UPDATE, - NODE_JOIN, NODE_LEAVE, - NODE_CHECK, NODE_RECOVER, NODE_OPERATION, -) = ( - 'NODE_CREATE', 'NODE_DELETE', 'NODE_UPDATE', - 'NODE_JOIN', 'NODE_LEAVE', - 'NODE_CHECK', 'NODE_RECOVER', 'NODE_OPERATION', -) - -ADJUSTMENT_PARAMS = ( - ADJUSTMENT_TYPE, ADJUSTMENT_NUMBER, ADJUSTMENT_MIN_STEP, - ADJUSTMENT_MIN_SIZE, ADJUSTMENT_MAX_SIZE, ADJUSTMENT_STRICT, -) = ( - 'adjustment_type', 'number', 'min_step', - 'min_size', 'max_size', 'strict', -) - -ADJUSTMENT_TYPES = ( - EXACT_CAPACITY, CHANGE_IN_CAPACITY, CHANGE_IN_PERCENTAGE, -) = ( - 'EXACT_CAPACITY', 'CHANGE_IN_CAPACITY', 'CHANGE_IN_PERCENTAGE', -) - -CLUSTER_ATTRS = ( - CLUSTER_NAME, CLUSTER_PROFILE, CLUSTER_DESIRED_CAPACITY, - CLUSTER_MIN_SIZE, CLUSTER_MAX_SIZE, CLUSTER_ID, - CLUSTER_DOMAIN, CLUSTER_PROJECT, CLUSTER_USER, - CLUSTER_INIT_AT, CLUSTER_CREATED_AT, CLUSTER_UPDATED_AT, - CLUSTER_STATUS, CLUSTER_STATUS_REASON, CLUSTER_TIMEOUT, - CLUSTER_METADATA, CLUSTER_CONFIG, -) = ( - 'name', 'profile_id', 'desired_capacity', - 'min_size', 'max_size', 'id', - 'domain', 'project', 'user', - 'init_at', 'created_at', 'updated_at', - 'status', 'status_reason', 'timeout', - 'metadata', 'config', -) - -CLUSTER_PARAMS = ( - CLUSTER_PROFILE_ONLY, CLUSTER_DELETE_FORCE -) = ( - 'profile_only', 'force', -) - -CLUSTER_STATUSES = ( - CS_INIT, CS_ACTIVE, CS_CREATING, CS_UPDATING, CS_RESIZING, CS_DELETING, - CS_CHECKING, CS_RECOVERING, CS_CRITICAL, CS_ERROR, CS_WARNING, - CS_OPERATING, -) = ( - 'INIT', 'ACTIVE', 'CREATING', 'UPDATING', 'RESIZING', 'DELETING', - 'CHECKING', 'RECOVERING', 'CRITICAL', 'ERROR', 'WARNING', - 'OPERATING', -) - -NODE_STATUSES = ( - NS_INIT, NS_ACTIVE, NS_ERROR, NS_WARNING, NS_CREATING, NS_UPDATING, - NS_DELETING, NS_RECOVERING, NS_OPERATING, -) = ( - 'INIT', 'ACTIVE', 'ERROR', 'WARNING', 'CREATING', 'UPDATING', - 'DELETING', 'RECOVERING', 'OPERATING', -) - -CLUSTER_SORT_KEYS = [ - CLUSTER_NAME, CLUSTER_STATUS, - CLUSTER_INIT_AT, CLUSTER_CREATED_AT, CLUSTER_UPDATED_AT, -] - -NODE_ATTRS = ( - NODE_INDEX, NODE_NAME, NODE_PROFILE_ID, NODE_CLUSTER_ID, - NODE_INIT_AT, NODE_CREATED_AT, NODE_UPDATED_AT, - NODE_STATUS, NODE_ROLE, NODE_METADATA, NODE_TAINTED, -) = ( - 'index', 'name', 'profile_id', 'cluster_id', - 'init_at', 'created_at', 'updated_at', - 'status', 'role', 'metadata', 'tainted', -) - -NODE_SORT_KEYS = [ - NODE_INDEX, NODE_NAME, NODE_STATUS, - NODE_INIT_AT, NODE_CREATED_AT, NODE_UPDATED_AT, -] - -NODE_PARAMS = ( - NODE_DELETE_FORCE, -) = ( - 'force', -) - -PROFILE_ATTRS = ( - PROFILE_ID, PROFILE_NAME, PROFILE_TYPE, - PROFILE_CREATED_AT, PROFILE_UPDATED_AT, - PROFILE_SPEC, PROFILE_METADATA, PROFILE_CONTEXT, -) = ( - 'id', 'name', 'type', - 'created_at', 'updated_at', - 'spec', 'metadata', 'context', -) - -PROFILE_SORT_KEYS = [ - PROFILE_TYPE, PROFILE_NAME, PROFILE_CREATED_AT, PROFILE_UPDATED_AT, -] - -POLICY_ATTRS = ( - POLICY_ID, POLICY_NAME, POLICY_TYPE, POLICY_SPEC, - POLICY_CREATED_AT, POLICY_UPDATED_AT, -) = ( - 'id', 'name', 'type', 'spec', - 'created_at', 'updated_at', -) - -POLICY_SORT_KEYS = [ - POLICY_TYPE, POLICY_NAME, - POLICY_CREATED_AT, POLICY_UPDATED_AT, -] - -CLUSTER_POLICY_ATTRS = ( - CP_POLICY_ID, CP_ENABLED, CP_PRIORITY, - CP_POLICY_NAME, CP_POLICY_TYPE, -) = ( - 'policy_id', 'enabled', 'priority', - 'policy_name', 'policy_type', -) - -CLUSTER_POLICY_SORT_KEYS = [ - CP_ENABLED, -] - -EVENT_ATTRS = ( - EVENT_TIMESTAMP, EVENT_OBJ_ID, EVENT_OBJ_NAME, EVENT_OBJ_TYPE, - EVENT_USER, EVENT_ACTION, EVENT_STATUS, EVENT_STATUS_REASON, - EVENT_LEVEL, EVENT_CLUSTER_ID, -) = ( - 'timestamp', 'oid', 'oname', 'otype', - 'user', 'action', 'status', 'status_reason', - 'level', 'cluster_id', -) - -EVENT_SORT_KEYS = [ - EVENT_TIMESTAMP, EVENT_LEVEL, EVENT_OBJ_TYPE, EVENT_OBJ_NAME, - EVENT_ACTION, EVENT_STATUS, EVENT_OBJ_ID, EVENT_CLUSTER_ID, -] - -ACTION_ATTRS = ( - ACTION_NAME, ACTION_CLUSTER_ID, ACTION_TARGET, ACTION_ACTION, ACTION_CAUSE, - ACTION_INTERVAL, ACTION_START_TIME, ACTION_END_TIME, - ACTION_TIMEOUT, ACTION_STATUS, ACTION_STATUS_REASON, - ACTION_INPUTS, ACTION_OUTPUTS, ACTION_DEPENDS_ON, ACTION_DEPENDED_BY, - ACTION_CREATED_AT, ACTION_UPDATED_AT, -) = ( - 'name', 'cluster_id', 'target', 'action', 'cause', - 'interval', 'start_time', 'end_time', - 'timeout', 'status', 'status_reason', - 'inputs', 'outputs', 'depends_on', 'depended_by', - 'created_at', 'updated_at', -) - -ACTION_SORT_KEYS = [ - ACTION_NAME, ACTION_TARGET, ACTION_ACTION, ACTION_CREATED_AT, - ACTION_STATUS, -] - -RECEIVER_TYPES = ( - RECEIVER_WEBHOOK, RECEIVER_MESSAGE, -) = ( - 'webhook', 'message', -) - -RECEIVER_ATTRS = ( - RECEIVER_NAME, RECEIVER_TYPE, RECEIVER_CLUSTER, RECEIVER_CLUSTER_ID, - RECEIVER_CREATED_AT, RECEIVER_UPDATED_AT, RECEIVER_USER_ID, - RECEIVER_ACTOR, RECEIVER_ACTION, RECEIVER_PARAMS, RECEIVER_CHANNEL, -) = ( - 'name', 'type', 'cluster', 'cluster_id', - 'created_at', 'updated_at', 'user', - 'actor', 'action', 'params', 'channel', -) - -RECEIVER_SORT_KEYS = [ - RECEIVER_NAME, RECEIVER_TYPE, RECEIVER_ACTION, RECEIVER_CLUSTER_ID, - RECEIVER_CREATED_AT, RECEIVER_USER_ID, -] - -CLUSTER_DEFAULT_VALUE = ( - CLUSTER_DEFAULT_MIN_SIZE, CLUSTER_DEFAULT_MAX_SIZE -) = ( - 0, -1 -) - -# Note: This is a copy of action status definition defined in -# senlin.engine.actions.base module. -ACTION_STATUSES = ( - ACTION_INIT, ACTION_WAITING, ACTION_READY, ACTION_RUNNING, - ACTION_SUCCEEDED, ACTION_FAILED, ACTION_CANCELLED, - ACTION_WAITING_LIFECYCLE_COMPLETION, ACTION_SUSPENDED, -) = ( - 'INIT', 'WAITING', 'READY', 'RUNNING', - 'SUCCEEDED', 'FAILED', 'CANCELLED', 'WAITING_LIFECYCLE_COMPLETION', - 'SUSPENDED', -) - -ACTION_PARAMS = ( - ACTION_UPDATE_FORCE, -) = ( - 'force', -) - -EVENT_LEVELS = { - 'CRITICAL': logging.CRITICAL, - 'ERROR': logging.ERROR, - 'WARN': logging.WARNING, - 'INFO': logging.INFO, - 'DEBUG': logging.DEBUG, -} - -DETECTION_TYPES = ( - LIFECYCLE_EVENTS, NODE_STATUS_POLLING, NODE_STATUS_POLL_URL, - HYPERVISOR_STATUS_POLLING, - # LB_STATUS_POLLING, -) = ( - 'LIFECYCLE_EVENTS', 'NODE_STATUS_POLLING', 'NODE_STATUS_POLL_URL', - 'HYPERVISOR_STATUS_POLLING', - # 'LB_STATUS_POLLING', -) - -HEALTH_CHECK_TYPES = ( - EVENTS, POLLING, -) = ( - 'EVENTS', 'POLLING' -) - -RECOVERY_ACTIONS = ( - RECOVER_REBOOT, RECOVER_REBUILD, RECOVER_RECREATE, -) = ( - 'REBOOT', 'REBUILD', 'RECREATE', -) - -RECOVERY_CONDITIONAL = ( - ALL_FAILED, ANY_FAILED, -) = ( - 'ALL_FAILED', 'ANY_FAILED', -) - -NOTIFICATION_PRIORITIES = ( - PRIO_AUDIT, PRIO_CRITICAL, PRIO_ERROR, PRIO_WARN, PRIO_INFO, PRIO_DEBUG, - PRIO_SAMPLE, -) = ( - 'audit', 'critical', 'error', 'warn', 'info', 'debug', 'sample', -) - -NOTIFICATION_PHASES = ( - PHASE_START, PHASE_END, PHASE_ERROR, -) = ( - 'start', 'end', 'error', -) - -LIFECYCLE_TRANSITION_TYPE = ( - LIFECYCLE_NODE_TERMINATION, -) = ( - 'termination', -) - -VM_STATUS = ( - VS_ACTIVE, VS_ERROR, VS_SUSPENDED, VS_SHUTOFF, VS_PAUSED, VS_RESCUE, - VS_DELETED, -) = ( - 'ACTIVE', 'ERROR', 'SUSPENDED', 'SHUTOFF', 'PAUSED', 'RESCUE', 'DELETED', -) - -HEALTH_CHECK_MESSAGE = ( - POLL_STATUS_PASS, POLL_STATUS_FAIL, POLL_URL_PASS, POLL_URL_FAIL, -) = ( - 'Poll Status health check passed', - 'Poll Status health check failed', - 'Poll URL health check passed', - 'Poll URL health check failed', -) - -CONFLICT_BYPASS_ACTIONS = [ - CLUSTER_DELETE, NODE_DELETE, NODE_OPERATION, -] - -LOCK_BYPASS_ACTIONS = [ - CLUSTER_DELETE, NODE_DELETE, NODE_OPERATION, -] - -REBOOT_TYPE = 'type' - -REBOOT_TYPES = ( - REBOOT_SOFT, REBOOT_HARD -) = ( - 'SOFT', 'HARD' -) diff --git a/senlin/common/context.py b/senlin/common/context.py deleted file mode 100644 index ceae73983..000000000 --- a/senlin/common/context.py +++ /dev/null @@ -1,125 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context as base_context -from oslo_utils import encodeutils - -from senlin.common import policy -from senlin.drivers import base as driver_base - - -class RequestContext(base_context.RequestContext): - """Stores information about the security context. - - The context encapsulates information related to the user accessing the - system, as well as additional request information. - """ - - def __init__(self, auth_token=None, user_id=None, project_id=None, - domain_id=None, user_domain_id=None, project_domain_id=None, - is_admin=None, read_only=False, show_deleted=False, - request_id=None, auth_url=None, trusts=None, - user_name=None, project_name=None, domain_name=None, - user_domain_name=None, project_domain_name=None, - auth_token_info=None, region_name=None, roles=None, - password=None, api_version=None, **kwargs): - - """Initializer of request context.""" - # We still have 'tenant' param because oslo_context still use it. - super(RequestContext, self).__init__( - auth_token=auth_token, user_id=user_id, project_id=project_id, - domain_id=domain_id, user_domain_id=user_domain_id, - project_domain_id=project_domain_id, - read_only=read_only, show_deleted=show_deleted, - request_id=request_id, - roles=roles) - - # request_id might be a byte array - self.request_id = encodeutils.safe_decode(self.request_id) - - self.auth_url = auth_url - self.trusts = trusts - - self.user_id = user_id - self.user_name = user_name - self.project_id = project_id - self.project_name = project_name - self.domain_id = domain_id - self.domain_name = domain_name - self.user_domain_name = user_domain_name - self.project_domain_name = project_domain_name - - self.auth_token_info = auth_token_info - self.region_name = region_name - self.password = password - self.api_version = api_version - - # Check user is admin or not - if is_admin is None: - self.is_admin = policy.enforce(self, 'context_is_admin', - target={'project': self.project_id}, - do_raise=False) - else: - self.is_admin = is_admin - - def to_dict(self): - # This to_dict() method is not producing 'project_id', 'user_id' or - # 'domain_id' which can be used in from_dict(). This is the reason - # why we are keeping our own copy of user_id, project_id and - # domain_id. - d = super(RequestContext, self).to_dict() - d.update({ - 'auth_url': self.auth_url, - 'auth_token_info': self.auth_token_info, - 'user_id': self.user_id, - 'user_name': self.user_name, - 'user_domain_name': self.user_domain_name, - 'project_id': self.project_id, - 'project_name': self.project_name, - 'project_domain_name': self.project_domain_name, - 'domain_id': self.domain_id, - 'domain_name': self.domain_name, - 'trusts': self.trusts, - 'region_name': self.region_name, - 'password': self.password, - 'api_version': self.api_version, - }) - return d - - @classmethod - def from_dict(cls, values): - return cls(**values) - - -def get_service_credentials(**kwargs): - """An abstraction layer for getting service credential. - - There could be multiple cloud backends for senlin to use. This - abstraction layer provides an indirection for senlin to get the - credentials of 'senlin' user on the specific cloud. By default, - this credential refers to the credentials built for keystone middleware - in an OpenStack cloud. - """ - identity_service = driver_base.SenlinDriver().identity - return identity_service.get_service_credentials(**kwargs) - - -def get_service_context(**kwargs): - """Get a customized service context.""" - identity_service = driver_base.SenlinDriver().identity - creds = identity_service.get_service_credentials(**kwargs) - return RequestContext.from_dict(creds) - - -def get_admin_context(): - """Create an administrator context.""" - return RequestContext(is_admin=True) diff --git a/senlin/common/exception.py b/senlin/common/exception.py deleted file mode 100644 index 4b354d645..000000000 --- a/senlin/common/exception.py +++ /dev/null @@ -1,307 +0,0 @@ -# -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Senlin exception subclasses. -""" - -import sys - -from oslo_log import log as logging - -from senlin.common.i18n import _ - -_FATAL_EXCEPTION_FORMAT_ERRORS = False -LOG = logging.getLogger(__name__) - - -class SenlinException(Exception): - """Base Senlin Exception. - - To correctly use this class, inherit from it and define a 'msg_fmt' - property. That msg_fmt will get printed with the keyword arguments - provided to the constructor. - """ - message = _("An unknown exception occurred.") - - def __init__(self, **kwargs): - self.kwargs = kwargs - - try: - self.message = self.msg_fmt % kwargs - # if last char is '.', wipe out redundant '.' - if self.message[-1] == '.': - self.message = self.message.rstrip('.') + '.' - except KeyError: - # exc_info = sys.exc_info() - # if kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%s: %s", name, value) # noqa - - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise - # raise exc_info[0], exc_info[1], exc_info[2] - - def __str__(self): - return str(self.message) - - def __deepcopy__(self, memo): - return self.__class__(**self.kwargs) - - -class SIGHUPInterrupt(SenlinException): - msg_fmt = _("System SIGHUP signal received.") - - -class NotAuthenticated(SenlinException): - msg_fmt = _("You are not authenticated.") - - -class Forbidden(SenlinException): - msg_fmt = _("You are not authorized to complete this operation.") - - -class OverQuota(SenlinException): - msg_fmt = _("Quota exceeded for resources.") - - -class BadRequest(SenlinException): - msg_fmt = _("%(msg)s.") - - -class InvalidAPIVersionString(SenlinException): - msg_fmt = _("API Version String '%(version)s' is of invalid format. It " - "must be of format 'major.minor'.") - - -class MethodVersionNotFound(SenlinException): - msg_fmt = _("API version '%(version)s' is not supported on this method.") - - -class InvalidGlobalAPIVersion(SenlinException): - msg_fmt = _("Version '%(req_ver)s' is not supported by the API. Minimum " - "is '%(min_ver)s' and maximum is '%(max_ver)s'.") - - -class MultipleChoices(SenlinException): - msg_fmt = _("Multiple results found matching the query criteria " - "'%(arg)s'. Please be more specific.") - - -class ResourceNotFound(SenlinException): - """Generic exception for resource not found. - - The resource type here can be 'cluster', 'node', 'profile', - 'policy', 'receiver', 'webhook', 'profile_type', 'policy_type', - 'action', 'event' and so on. - """ - msg_fmt = _("The %(type)s '%(id)s' could not be found.") - - @staticmethod - def enhance_msg(enhance, ex): - enhance_msg = ex.message[:4] + enhance + ' ' + ex.message[4:] - return enhance_msg - - -class ResourceInUse(SenlinException): - """Generic exception for resource in use. - - The resource type here can be 'cluster', 'node', 'profile', - 'policy', 'receiver', 'webhook', 'profile_type', 'policy_type', - 'action', 'event' and so on. - """ - msg_fmt = _("The %(type)s '%(id)s' cannot be deleted: %(reason)s.") - - -class ResourceIsLocked(SenlinException): - """Generic exception for resource in use. - - The resource type here can be 'cluster', 'node'. - """ - msg_fmt = _("%(action)s for %(type)s '%(id)s' cannot be completed " - "because it is already locked.") - - -class ProfileNotSpecified(SenlinException): - msg_fmt = _("Profile not specified.") - - -class ProfileOperationFailed(SenlinException): - msg_fmt = _("%(message)s") - - -class ProfileOperationTimeout(SenlinException): - msg_fmt = _("%(message)s") - - -class PolicyNotSpecified(SenlinException): - msg_fmt = _("Policy not specified.") - - -class PolicyBindingNotFound(SenlinException): - msg_fmt = _("The policy '%(policy)s' is not found attached to the " - "specified cluster '%(identity)s'.") - - -class PolicyTypeConflict(SenlinException): - msg_fmt = _("The policy with type '%(policy_type)s' already exists.") - - -class InvalidSpec(SenlinException): - msg_fmt = _("%(message)s") - - -class FeatureNotSupported(SenlinException): - msg_fmt = _("%(feature)s is not supported.") - - -class Error(SenlinException): - msg_fmt = "%(message)s" - - def __init__(self, msg): - super(Error, self).__init__(message=msg) - - -class InvalidContentType(SenlinException): - msg_fmt = _("Invalid content type %(content_type)s") - - -class RequestLimitExceeded(SenlinException): - msg_fmt = _('Request limit exceeded: %(message)s') - - -class ActionInProgress(SenlinException): - msg_fmt = _("The %(type)s '%(id)s' is in status %(status)s.") - - -class ActionConflict(SenlinException): - msg_fmt = _("The %(type)s action for target %(target)s conflicts with " - "the following action(s): %(actions)s") - - -class ActionCooldown(SenlinException): - msg_fmt = _("The %(type)s action for cluster %(cluster)s cannot be " - "processed due to Policy %(policy_id)s cooldown still in " - "progress") - - -class ActionImmutable(SenlinException): - msg_fmt = _("Action (%(id)s) is in status (%(actual)s) while expected " - "status must be one of (%(expected)s).") - - -class NodeNotOrphan(SenlinException): - msg_fmt = _("%(message)s") - - -class InternalError(SenlinException): - """A base class for internal exceptions in senlin. - - The internal exception classes which inherit from :class:`SenlinException` - class should be translated to a user facing exception type if they need to - be made user visible. - """ - msg_fmt = _("%(message)s") - message = _('Internal error happened') - - def __init__(self, **kwargs): - self.code = kwargs.pop('code', 500) - # If a "message" is not provided, or None or blank, use the default. - self.message = kwargs.pop('message', self.message) or self.message - super(InternalError, self).__init__( - code=self.code, message=self.message, **kwargs) - - -class EResourceBusy(InternalError): - # Internal exception, not to be exposed to end user. - msg_fmt = _("The %(type)s '%(id)s' is busy now.") - - -class TrustNotFound(InternalError): - # Internal exception, not to be exposed to end user. - msg_fmt = _("The trust for trustor '%(trustor)s' could not be found.") - - -class EResourceCreation(InternalError): - # Used when creating resources in other services - def __init__(self, **kwargs): - self.resource_id = kwargs.pop('resource_id', None) - super(EResourceCreation, self).__init__( - resource_id=self.resource_id, **kwargs) - msg_fmt = _("Failed in creating %(type)s: %(message)s.") - - -class EResourceUpdate(InternalError): - # Used when updating resources from other services - msg_fmt = _("Failed in updating %(type)s '%(id)s': %(message)s.") - - -class EResourceDeletion(InternalError): - # Used when deleting resources from other services - msg_fmt = _("Failed in deleting %(type)s '%(id)s': %(message)s.") - - -class EServerNotFound(InternalError): - # Used when deleting resources from other services - msg_fmt = _("Failed in found %(type)s '%(id)s': %(message)s.") - - -class EResourceOperation(InternalError): - """Generic exception for resource fail operation. - - The op here can be 'recovering','rebuilding', 'checking' and - so on. And the op 'creating', 'updating' and 'deleting' we can - use separately class `EResourceCreation`,`EResourceUpdate` and - `EResourceDeletion`. - The type here is resource's driver type.It can be 'server', - 'stack', 'container' and so on. - The id is resource's id. - The message here can be message from class 'ResourceNotFound', - 'ResourceInUse' and so on, or developer can specified message. - """ - def __init__(self, **kwargs): - self.resource_id = kwargs.pop('resource_id', None) - super(EResourceOperation, self).__init__( - resource_id=self.resource_id, **kwargs) - # Used when operating resources from other services - msg_fmt = _("Failed in %(op)s %(type)s '%(id)s': %(message)s.") - - -class ESchema(InternalError): - msg_fmt = _("%(message)s") - - -class InvalidPlugin(InternalError): - msg_fmt = _("%(message)s") - - -class PolicyNotAttached(InternalError): - msg_fmt = _("The policy '%(policy)s' is not attached to the specified " - "cluster '%(cluster)s'.") - - -class HTTPExceptionDisguise(Exception): - """Disguises HTTP exceptions. - - The purpose is to let them be handled by the webob fault application - in the wsgi pipeline. - """ - - def __init__(self, exception): - self.exc = exception - self.tb = sys.exc_info()[2] diff --git a/senlin/common/i18n.py b/senlin/common/i18n.py deleted file mode 100644 index a1e082353..000000000 --- a/senlin/common/i18n.py +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# It's based on oslo.i18n usage in OpenStack Keystone project and -# recommendations from -# https://docs.openstack.org/oslo.i18n/latest/ - -import oslo_i18n - - -_translators = oslo_i18n.TranslatorFactory(domain='senlin') - -# The primary translation function using the well-known name "_" -_ = _translators.primary diff --git a/senlin/common/messaging.py b/senlin/common/messaging.py deleted file mode 100644 index bd864b941..000000000 --- a/senlin/common/messaging.py +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from oslo_config import cfg -import oslo_messaging as messaging -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import context - -# An alias for the default serializer -JsonPayloadSerializer = messaging.JsonPayloadSerializer - -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -NOTIFIER = None - - -class RequestContextSerializer(messaging.Serializer): - def __init__(self, base): - self._base = base - - def serialize_entity(self, ctxt, entity): - if not self._base: - return entity - return self._base.serialize_entity(ctxt, entity) - - def deserialize_entity(self, ctxt, entity): - if not self._base: - return entity - return self._base.deserialize_entity(ctxt, entity) - - @staticmethod - def serialize_context(ctxt): - _context = ctxt.to_dict() - prof = profiler.get() - if prof: - trace_info = { - "hmac_key": prof.hmac_key, - "base_id": prof.get_base_id(), - "parent_id": prof.get_id() - } - _context.update({"trace_info": trace_info}) - return _context - - @staticmethod - def deserialize_context(ctxt): - trace_info = ctxt.pop("trace_info", None) - if trace_info: - profiler.init(**trace_info) - return context.RequestContext.from_dict(ctxt) - - -def setup(url=None, optional=False): - """Initialise the oslo_messaging layer.""" - global TRANSPORT, GLOBAL_TRANSPORT, NOTIFIER - - if url and url.startswith("fake://"): - # NOTE: oslo_messaging fake driver uses time.sleep - # for task switch, so we need to monkey_patch it - eventlet.monkey_patch(time=True) - - messaging.set_transport_defaults('senlin') - if not TRANSPORT: - exmods = ['senlin.common.exception'] - try: - TRANSPORT = messaging.get_rpc_transport( - cfg.CONF, url, allowed_remote_exmods=exmods) - except messaging.InvalidTransportURL as e: - TRANSPORT = None - if not optional or e.url: - # NOTE: oslo_messaging is configured but unloadable - # so reraise the exception - raise - - if not NOTIFIER: - exmods = ['senlin.common.exception'] - try: - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - cfg.CONF, allowed_remote_exmods=exmods) - except Exception: - raise - - serializer = RequestContextSerializer(JsonPayloadSerializer()) - NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer, - topics=cfg.CONF.notification_topics) - - -def cleanup(): - """Cleanup the oslo_messaging layer.""" - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - if TRANSPORT: - TRANSPORT.cleanup() - TRANSPORT = None - NOTIFIER = None - if NOTIFICATION_TRANSPORT: - NOTIFICATION_TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT = None - - -def get_rpc_server(target, endpoint, serializer=None): - """Return a configured oslo_messaging rpc server.""" - if serializer is None: - serializer = JsonPayloadSerializer() - serializer = RequestContextSerializer(serializer) - return messaging.get_rpc_server(TRANSPORT, target, [endpoint], - executor='eventlet', - serializer=serializer) - - -def get_rpc_client(topic, server, serializer=None): - """Return a configured oslo_messaging RPCClient.""" - target = messaging.Target(topic=topic, server=server, - version=consts.RPC_API_VERSION_BASE) - if serializer is None: - serializer = JsonPayloadSerializer() - serializer = RequestContextSerializer(serializer) - return messaging.get_rpc_client( - TRANSPORT, target, serializer=serializer) - - -def get_notifier(publisher_id): - """Return a configured oslo_messaging notifier.""" - global NOTIFIER - return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/senlin/common/policies/__init__.py b/senlin/common/policies/__init__.py deleted file mode 100644 index ecaeab2da..000000000 --- a/senlin/common/policies/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Borrowed from Zun - - -import itertools - -from senlin.common.policies import actions -from senlin.common.policies import base -from senlin.common.policies import build_info -from senlin.common.policies import cluster_policies -from senlin.common.policies import clusters -from senlin.common.policies import events -from senlin.common.policies import nodes -from senlin.common.policies import policies -from senlin.common.policies import policy_types -from senlin.common.policies import profile_types -from senlin.common.policies import profiles -from senlin.common.policies import receivers -from senlin.common.policies import services -from senlin.common.policies import webhooks - - -def list_rules(): - return itertools.chain( - base.list_rules(), - build_info.list_rules(), - profile_types.list_rules(), - policy_types.list_rules(), - clusters.list_rules(), - profiles.list_rules(), - nodes.list_rules(), - policies.list_rules(), - cluster_policies.list_rules(), - receivers.list_rules(), - actions.list_rules(), - events.list_rules(), - webhooks.list_rules(), - services.list_rules() - ) diff --git a/senlin/common/policies/actions.py b/senlin/common/policies/actions.py deleted file mode 100644 index 6de922b7c..000000000 --- a/senlin/common/policies/actions.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="actions:index", - check_str=base.UNPROTECTED, - description="List actions", - operations=[ - { - 'path': '/v1/actions', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="actions:get", - check_str=base.UNPROTECTED, - description="Show action details", - operations=[ - { - 'path': '/v1/actions/{action_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="actions:update", - check_str=base.UNPROTECTED, - description="Update action", - operations=[ - { - 'path': '/v1/actions/{action_id}', - 'method': 'PATCH' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/base.py b/senlin/common/policies/base.py deleted file mode 100644 index 8f57c706b..000000000 --- a/senlin/common/policies/base.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - - -ROLE_ADMIN = 'role:admin' -DENY_EVERYBODY = '!' -UNPROTECTED = '' - -rules = [ - policy.RuleDefault( - name="context_is_admin", - check_str=ROLE_ADMIN - ), - policy.RuleDefault( - name="deny_everybody", - check_str=DENY_EVERYBODY - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/build_info.py b/senlin/common/policies/build_info.py deleted file mode 100644 index 1cba5a46c..000000000 --- a/senlin/common/policies/build_info.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="build_info:build_info", - check_str=base.UNPROTECTED, - description="Show build information", - operations=[ - { - 'path': '/v1/build-info', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/cluster_policies.py b/senlin/common/policies/cluster_policies.py deleted file mode 100644 index 84ba0a685..000000000 --- a/senlin/common/policies/cluster_policies.py +++ /dev/null @@ -1,80 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="cluster_policies:index", - check_str=base.UNPROTECTED, - description="List cluster policies", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/policies', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="cluster_policies:attach", - check_str=base.UNPROTECTED, - description="Attach a Policy to a Cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/actions', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="cluster_policies:detach", - check_str=base.UNPROTECTED, - description="Detach a Policy from a Cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/actions', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="cluster_policies:update", - check_str=base.UNPROTECTED, - description="Update a Policy on a Cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/actions', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="cluster_policies:get", - check_str=base.UNPROTECTED, - description="Show cluster_policy details", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/policies/{policy_id}', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/clusters.py b/senlin/common/policies/clusters.py deleted file mode 100644 index c8af62c91..000000000 --- a/senlin/common/policies/clusters.py +++ /dev/null @@ -1,113 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="clusters:index", - check_str=base.UNPROTECTED, - description="List clusters", - operations=[ - { - 'path': '/v1/clusters', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:create", - check_str=base.UNPROTECTED, - description="Create cluster", - operations=[ - { - 'path': '/v1/clusters', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:delete", - check_str=base.UNPROTECTED, - description="Delete cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}', - 'method': 'DELETE' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:get", - check_str=base.UNPROTECTED, - description="Show cluster details", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:action", - check_str=base.UNPROTECTED, - description="Perform specified action on a cluster.", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/actions', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:update", - check_str=base.UNPROTECTED, - description="Update cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}', - 'method': 'PATCH' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:collect", - check_str=base.UNPROTECTED, - description="Collect Attributes Across a Cluster", - operations=[ - { - 'path': 'v1/clusters/{cluster_id}/attrs/{path}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="clusters:operation", - check_str=base.UNPROTECTED, - description="Perform an Operation on a Cluster", - operations=[ - { - 'path': '/v1/clusters/{cluster_id}/ops', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/events.py b/senlin/common/policies/events.py deleted file mode 100644 index b2fe54542..000000000 --- a/senlin/common/policies/events.py +++ /dev/null @@ -1,47 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="events:index", - check_str=base.UNPROTECTED, - description="List events", - operations=[ - { - 'path': '/v1/events', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="events:get", - check_str=base.UNPROTECTED, - description="Show event details", - operations=[ - { - 'path': '/v1/events/{event_id}', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/nodes.py b/senlin/common/policies/nodes.py deleted file mode 100644 index 29b77ac22..000000000 --- a/senlin/common/policies/nodes.py +++ /dev/null @@ -1,124 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="nodes:index", - check_str=base.UNPROTECTED, - description="List nodes", - operations=[ - { - 'path': '/v1/nodes', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:create", - check_str=base.UNPROTECTED, - description="Create node", - operations=[ - { - 'path': '/v1/nodes', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:adopt", - check_str=base.UNPROTECTED, - description="Adopt node", - operations=[ - { - 'path': '/v1/nodes/adopt', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:adopt_preview", - check_str=base.UNPROTECTED, - description="Adopt node (preview)", - operations=[ - { - 'path': '/v1/nodes/adopt-preview', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:get", - check_str=base.UNPROTECTED, - description="Show node details", - operations=[ - { - 'path': '/v1/nodes/{node_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:action", - check_str=base.UNPROTECTED, - description="Perform specified action on a Node.", - operations=[ - { - 'path': '/v1/nodes/{node_id}/actions', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:update", - check_str=base.UNPROTECTED, - description="Update node", - operations=[ - { - 'path': '/v1/nodes/{node_id}', - 'method': 'PATCH' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:delete", - check_str=base.UNPROTECTED, - description="Delete node", - operations=[ - { - 'path': '/v1/nodes/{node_id}', - 'method': 'DELETE' - } - ] - ), - policy.DocumentedRuleDefault( - name="nodes:operation", - check_str=base.UNPROTECTED, - description="Perform an Operation on a Node", - operations=[ - { - 'path': '/v1/nodes/{node_id}/ops', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/policies.py b/senlin/common/policies/policies.py deleted file mode 100644 index d4b12c8b0..000000000 --- a/senlin/common/policies/policies.py +++ /dev/null @@ -1,91 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="policies:index", - check_str=base.UNPROTECTED, - description="List policies", - operations=[ - { - 'path': '/v1/policies', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="policies:create", - check_str=base.UNPROTECTED, - description="Create policy", - operations=[ - { - 'path': '/v1/policies', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="policies:get", - check_str=base.UNPROTECTED, - description="Show policy details", - operations=[ - { - 'path': '/v1/policies/{policy_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="policies:update", - check_str=base.UNPROTECTED, - description="Update policy", - operations=[ - { - 'path': '/v1/policies/{policy_id}', - 'method': 'PATCH' - } - ] - ), - policy.DocumentedRuleDefault( - name="policies:delete", - check_str=base.UNPROTECTED, - description="Delete policy", - operations=[ - { - 'path': '/v1/policies/{policy_id}', - 'method': 'DELETE' - } - ] - ), - policy.DocumentedRuleDefault( - name="policies:validate", - check_str=base.UNPROTECTED, - description="Validate policy.", - operations=[ - { - 'path': '/v1/policies/validate', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/policy_types.py b/senlin/common/policies/policy_types.py deleted file mode 100644 index 633aa93eb..000000000 --- a/senlin/common/policies/policy_types.py +++ /dev/null @@ -1,47 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="policy_types:index", - check_str=base.UNPROTECTED, - description="List policy types", - operations=[ - { - 'path': '/v1/policy-types', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="policy_types:get", - check_str=base.UNPROTECTED, - description="Show policy type details", - operations=[ - { - 'path': '/v1/policy-types/{policy_type}', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/profile_types.py b/senlin/common/policies/profile_types.py deleted file mode 100644 index 4de450924..000000000 --- a/senlin/common/policies/profile_types.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="profile_types:index", - check_str=base.UNPROTECTED, - description="List profile types", - operations=[ - { - 'path': '/v1/profile-types', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="profile_types:get", - check_str=base.UNPROTECTED, - description="Show profile type details", - operations=[ - { - 'path': '/v1/profile-types/{profile_type}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="profile_types:ops", - check_str=base.UNPROTECTED, - description="List profile type operations", - operations=[ - { - 'path': '/v1/profile-types/{profile_type}/ops', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/profiles.py b/senlin/common/policies/profiles.py deleted file mode 100644 index ed6bb2177..000000000 --- a/senlin/common/policies/profiles.py +++ /dev/null @@ -1,91 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="profiles:index", - check_str=base.UNPROTECTED, - description="List profiles", - operations=[ - { - 'path': '/v1/profiles', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="profiles:create", - check_str=base.UNPROTECTED, - description="Create profile", - operations=[ - { - 'path': '/v1/profiles', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="profiles:get", - check_str=base.UNPROTECTED, - description="Show profile details", - operations=[ - { - 'path': '/v1/profiles/{profile_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="profiles:delete", - check_str=base.UNPROTECTED, - description="Delete profile", - operations=[ - { - 'path': '/v1/profiles/{profile_id}', - 'method': 'DELETE' - } - ] - ), - policy.DocumentedRuleDefault( - name="profiles:update", - check_str=base.UNPROTECTED, - description="Update profile", - operations=[ - { - 'path': '/v1/profiles/{profile_id}', - 'method': 'PATCH' - } - ] - ), - policy.DocumentedRuleDefault( - name="profiles:validate", - check_str=base.UNPROTECTED, - description="Validate profile", - operations=[ - { - 'path': '/v1/profiles/validate', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/receivers.py b/senlin/common/policies/receivers.py deleted file mode 100644 index f69412bcd..000000000 --- a/senlin/common/policies/receivers.py +++ /dev/null @@ -1,91 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="receivers:index", - check_str=base.UNPROTECTED, - description="List receivers", - operations=[ - { - 'path': '/v1/receivers', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="receivers:create", - check_str=base.UNPROTECTED, - description="Create receiver", - operations=[ - { - 'path': '/v1/receivers', - 'method': 'POST' - } - ] - ), - policy.DocumentedRuleDefault( - name="receivers:get", - check_str=base.UNPROTECTED, - description="Show receiver details", - operations=[ - { - 'path': '/v1/receivers/{receiver_id}', - 'method': 'GET' - } - ] - ), - policy.DocumentedRuleDefault( - name="receivers:update", - check_str=base.UNPROTECTED, - description="Update receiver", - operations=[ - { - 'path': '/v1/receivers/{receiver_id}', - 'method': 'PATCH' - } - ] - ), - policy.DocumentedRuleDefault( - name="receivers:delete", - check_str=base.UNPROTECTED, - description="Delete receiver", - operations=[ - { - 'path': '/v1/receivers/{receiver_id}', - 'method': 'DELETE' - } - ] - ), - policy.DocumentedRuleDefault( - name="receivers:notify", - check_str=base.UNPROTECTED, - description="Notify receiver", - operations=[ - { - 'path': '/v1/receivers/{receiver_id}/notify', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/services.py b/senlin/common/policies/services.py deleted file mode 100644 index c708d86c8..000000000 --- a/senlin/common/policies/services.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="services:index", - check_str=base.ROLE_ADMIN, - description="List services", - operations=[ - { - 'path': '/v1/services', - 'method': 'GET' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policies/webhooks.py b/senlin/common/policies/webhooks.py deleted file mode 100644 index cc708b94f..000000000 --- a/senlin/common/policies/webhooks.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from senlin.common.policies import base - -rules = [ - policy.DocumentedRuleDefault( - name="webhooks:trigger", - check_str=base.UNPROTECTED, - description="Trigger webhook action", - operations=[ - { - 'path': '/v1/webhooks/{webhook_id}/trigger', - 'method': 'POST' - } - ] - ) -] - - -def list_rules(): - return rules diff --git a/senlin/common/policy.py b/senlin/common/policy.py deleted file mode 100644 index 5f5a4b683..000000000 --- a/senlin/common/policy.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy Engine For Senlin -""" - -# from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_policy import opts -from oslo_policy import policy - -from senlin.common import exception -from senlin.common import policies - -POLICY_ENFORCER = None -CONF = cfg.CONF - -# TODO(gmann): Remove setting the default value of config policy_file -# once oslo_policy change the default value to 'policy.yaml'. -# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 -DEFAULT_POLICY_FILE = 'policy.yaml' -opts.set_defaults(CONF, DEFAULT_POLICY_FILE) - - -# @lockutils.synchronized('policy_enforcer', 'senlin-') -def _get_enforcer(policy_file=None, rules=None, default_rule=None): - - global POLICY_ENFORCER - - if POLICY_ENFORCER is None: - POLICY_ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule) - POLICY_ENFORCER.register_defaults(policies.list_rules()) - return POLICY_ENFORCER - - -def enforce(context, rule, target, do_raise=True, *args, **kwargs): - - enforcer = _get_enforcer() - credentials = context.to_dict() - target = target or {} - if do_raise: - kwargs.update(exc=exception.Forbidden) - - return enforcer.enforce(rule, target, credentials, do_raise, - *args, **kwargs) diff --git a/senlin/common/profiler.py b/senlin/common/profiler.py deleted file mode 100644 index abb3e7443..000000000 --- a/senlin/common/profiler.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -import osprofiler.profiler -import osprofiler.web - -from senlin.common import context -from senlin.common import messaging - -cfg.CONF.import_opt('enabled', 'senlin.conf', group='profiler') - -LOG = logging.getLogger(__name__) - - -def setup(binary, host): - if cfg.CONF.profiler.enabled: - _notifier = osprofiler.notifier.create( - "Messaging", oslo_messaging, context.get_admin_context().to_dict(), - messaging.TRANSPORT, "senlin", binary, host) - osprofiler.notifier.set(_notifier) - osprofiler.web.enable(cfg.CONF.profiler.hmac_keys) - LOG.warning("OSProfiler is enabled.\nIt means that any person who " - "knows any of hmac_keys that are specified in " - "/etc/senlin/senlin.conf can trace his requests. \n" - "In real life only an operator can read this file so " - "there is no security issue. Note that even if any " - "person can trigger the profiler, only an admin user " - "can retrieve trace.\n" - "To disable OSProfiler set in senlin.conf:\n" - "[profiler]\nenabled=false") - else: - osprofiler.web.disable() diff --git a/senlin/common/scaleutils.py b/senlin/common/scaleutils.py deleted file mode 100644 index c72462cc6..000000000 --- a/senlin/common/scaleutils.py +++ /dev/null @@ -1,328 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utilities for scaling actions and related policies. -""" - -import math -import random - -from oslo_config import cfg -from oslo_log import log as logging - -from senlin.common import consts -from senlin.common.i18n import _ - -LOG = logging.getLogger(__name__) - - -def calculate_desired(current, adj_type, number, min_step): - """Calculate desired capacity based on the type and number values. - - :param current: Current capacity of the cluster. - :param adj_type: Type of adjustment. - :param number: Number for the corresponding adjustment type. - :param min_step: Minimum number of nodes to create/delete. - :returns: A number representing the desired capacity. - """ - if adj_type == consts.EXACT_CAPACITY: - desired = number - elif adj_type == consts.CHANGE_IN_CAPACITY: - desired = current + number - else: # consts.CHANGE_IN_PERCENTAGE: - delta = (number * current) / 100.0 - if delta > 0.0: - rounded = int(math.ceil(delta) if math.fabs(delta) < 1.0 - else math.floor(delta)) - else: - rounded = int(math.floor(delta) if math.fabs(delta) < 1.0 - else math.ceil(delta)) - - if min_step is not None and min_step > abs(rounded): - adjust = min_step if rounded > 0 else -min_step - desired = current + adjust - else: - desired = current + rounded - - return desired - - -def truncate_desired(cluster, desired, min_size, max_size): - """Do truncation of desired capacity for non-strict cases. - - :param cluster: The target cluster. - :param desired: The expected capacity of the cluster. - :param min_size: The NEW minimum capacity set for the cluster. - :param max_size: The NEW maximum capacity set for the cluster. - """ - - if min_size is not None and desired < min_size: - desired = min_size - LOG.debug("Truncating shrinkage to specified min_size (%s).", - desired) - - if min_size is None and desired < cluster.min_size: - desired = cluster.min_size - LOG.debug("Truncating shrinkage to cluster's min_size (%s).", - desired) - - if max_size is not None and max_size > 0 and desired > max_size: - desired = max_size - LOG.debug("Truncating growth to specified max_size (%s).", - desired) - - if (max_size is None and desired > cluster.max_size and - cluster.max_size > 0): - desired = cluster.max_size - LOG.debug("Truncating growth to cluster's max_size (%s).", - desired) - - return desired - - -def check_size_params(cluster=None, desired=None, min_size=None, max_size=None, - strict=False): - """Validate provided arguments against cluster properties. - - Sanity Checking 1: the desired, min_size, max_size parameters must - form a reasonable relationship among themselves, - if specified. - Sanity Checking 2: the desired_capacity must be within the existing - range of the cluster, if new range is not provided. - - :param cluster: The cluster object if provided. - :param desired: The desired capacity for an operation if provided. - :param min_size: The new min_size property for the cluster, if provided. - :param max_size: The new max_size property for the cluster, if provided. - :param strict: Whether we are doing a strict checking. - - :return: A string of error message if failed checking or None if passed - the checking. - """ - - max_nodes_per_cluster = cfg.CONF.max_nodes_per_cluster - if desired is not None: - # recalculate/validate desired based on strict setting - if desired > max_nodes_per_cluster: - v = {'d': desired, 'm': max_nodes_per_cluster} - return _("The target capacity (%(d)s) is greater than the " - "maximum number of nodes allowed per cluster " - "(%(m)s).") % v - if (min_size is not None and desired < min_size): - v = {'d': desired, 'm': min_size} - return _("The target capacity (%(d)s) is less than " - "the specified min_size (%(m)s).") % v - - if (min_size is None and cluster is not None and - desired < cluster.min_size and strict): - v = {'d': desired, 'm': cluster.min_size} - return _("The target capacity (%(d)s) is less than " - "the cluster's min_size (%(m)s).") % v - - if (max_size is not None and desired > max_size and - max_size >= 0): - v = {'d': desired, 'm': max_size} - return _("The target capacity (%(d)s) is greater " - "than the specified max_size (%(m)s).") % v - - if (max_size is None and cluster is not None and - desired > cluster.max_size and - cluster.max_size >= 0 and strict): - v = {'d': desired, 'm': cluster.max_size} - return _("The target capacity (%(d)s) is greater " - "than the cluster's max_size (%(m)s).") % v - - if min_size is not None: - if max_size is not None and max_size >= 0 and min_size > max_size: - v = {'n': min_size, 'm': max_size} - return _("The specified min_size (%(n)s) is greater than the " - "specified max_size (%(m)s).") % v - - if (max_size is None and cluster is not None and - cluster.max_size >= 0 and min_size > cluster.max_size): - v = {'n': min_size, 'm': cluster.max_size} - return _("The specified min_size (%(n)s) is greater than the " - "current max_size (%(m)s) of the cluster.") % v - - if (desired is None and cluster is not None and - min_size > cluster.desired_capacity and strict): - v = {'n': min_size, 'd': cluster.desired_capacity} - return _("The specified min_size (%(n)s) is greater than the " - "current desired_capacity (%(d)s) of the cluster.") % v - - if max_size is not None: - if max_size > max_nodes_per_cluster: - v = {'m': max_size, 'mc': max_nodes_per_cluster} - return _("The specified max_size (%(m)s) is greater than the " - "maximum number of nodes allowed per cluster " - "(%(mc)s).") % v - if (min_size is None and cluster is not None and - max_size >= 0 and max_size < cluster.min_size): - v = {'m': max_size, 'n': cluster.min_size} - return _("The specified max_size (%(m)s) is less than the " - "current min_size (%(n)s) of the cluster.") % v - - if (desired is None and cluster is not None and - max_size >= 0 and max_size < cluster.desired_capacity and - strict): - v = {'m': max_size, 'd': cluster.desired_capacity} - return _("The specified max_size (%(m)s) is less than the " - "current desired_capacity (%(d)s) of the cluster.") % v - - return None - - -def parse_resize_params(action, cluster, current=None): - """Parse the parameters of CLUSTER_RESIZE action. - - :param action: The current action which contains some inputs for parsing. - :param cluster: The target cluster to operate. - :param current: The current capacity of the cluster. - :returns: A tuple containing a flag and a message. In the case of a - success, the flag should be action.RES_OK and the message can be - ignored. The action.data will contain a dict indicating the - operation and parameters for further processing. In the case of - a failure, the flag should be action.RES_ERROR and the message - will contain a string message indicating the reason of failure. - """ - - adj_type = action.inputs.get(consts.ADJUSTMENT_TYPE, None) - number = action.inputs.get(consts.ADJUSTMENT_NUMBER, None) - min_size = action.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None) - max_size = action.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None) - min_step = action.inputs.get(consts.ADJUSTMENT_MIN_STEP, None) - strict = action.inputs.get(consts.ADJUSTMENT_STRICT, False) - - current = current or cluster.desired_capacity - if adj_type is not None: - # number must be not None according to previous tests - desired = calculate_desired(current, adj_type, number, min_step) - else: - desired = current - - # truncate adjustment if permitted (strict==False) - if strict is False: - desired = truncate_desired(cluster, desired, min_size, max_size) - - # check provided params against current properties - # desired is checked when strict is True - result = check_size_params(cluster, desired, min_size, max_size, strict) - if result: - return action.RES_ERROR, result - - # save sanitized properties - count = current - desired - if count > 0: - action.data.update({ - 'deletion': { - 'count': count, - } - }) - else: - action.data.update({ - 'creation': { - 'count': abs(count), - } - }) - - return action.RES_OK, '' - - -def filter_error_nodes(nodes): - """Filter out ERROR nodes from the given node list. - - :param nodes: candidate nodes for filter. - :return: a tuple containing the chosen nodes' IDs and the undecided - (good) nodes. - """ - good = [] - bad = [] - not_created = [] - for n in nodes: - if (n.status == consts.NS_ERROR or n.status == consts.NS_WARNING or - n.tainted): - bad.append(n.id) - elif n.created_at is None: - not_created.append(n.id) - else: - good.append(n) - - bad.extend(not_created) - return bad, good - - -def nodes_by_random(nodes, count): - """Select nodes based on random number. - - :param nodes: list of candidate nodes. - :param count: maximum number of nodes for selection. - :return: a list of IDs for victim nodes. - """ - selected, candidates = filter_error_nodes(nodes) - if count <= len(selected): - return selected[:count] - - count -= len(selected) - random.seed() - - i = count - while i > 0: - rand = random.randrange(len(candidates)) - selected.append(candidates[rand].id) - candidates.remove(candidates[rand]) - i = i - 1 - - return selected - - -def nodes_by_age(nodes, count, old_first): - """Select nodes based on node creation time. - - :param nodes: list of candidate nodes. - :param count: maximum number of nodes for selection. - :param old_first: whether old nodes should appear before young ones. - :return: a list of IDs for victim nodes. - """ - selected, candidates = filter_error_nodes(nodes) - if count <= len(selected): - return selected[:count] - - count -= len(selected) - sorted_list = sorted(candidates, key=lambda r: r.created_at) - for i in range(count): - if old_first: - selected.append(sorted_list[i].id) - else: # YOUNGEST_FIRST - selected.append(sorted_list[-1 - i].id) - return selected - - -def nodes_by_profile_age(nodes, count): - """Select nodes based on node profile creation time. - - Note that old nodes will come before young ones. - - :param nodes: list of candidate nodes. - :param count: maximum number of nodes for selection. - :return: a list of IDs for victim nodes. - """ - selected, candidates = filter_error_nodes(nodes) - if count <= len(selected): - return selected[:count] - - count -= len(selected) - sorted_list = sorted(candidates, key=lambda n: n.profile_created_at) - for i in range(count): - selected.append(sorted_list[i].id) - - return selected diff --git a/senlin/common/schema.py b/senlin/common/schema.py deleted file mode 100644 index 0308d3f2d..000000000 --- a/senlin/common/schema.py +++ /dev/null @@ -1,536 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import numbers - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import strutils - -from senlin.common import exception as exc -from senlin.common.i18n import _ - -LOG = logging.getLogger(__name__) - - -class AnyIndexDict(collections.abc.Mapping): - """Convenience schema for a list.""" - - def __init__(self, value): - self.value = value - - def __getitem__(self, key): - if key != '*' and not isinstance(key, int): - raise KeyError("Invalid key %s" % str(key)) - - return self.value - - def __iter__(self): - yield '*' - - def __len__(self): - return 1 - - -class SchemaBase(collections.abc.Mapping): - """Class for validating property or operation schemas.""" - - KEYS = ( - TYPE, DESCRIPTION, DEFAULT, REQUIRED, SCHEMA, CONSTRAINTS, - MIN_VERSION, MAX_VERSION, - ) = ( - 'type', 'description', 'default', 'required', 'schema', 'constraints', - 'min_version', 'max_version', - ) - - TYPES = ( - INTEGER, STRING, NUMBER, BOOLEAN, MAP, LIST, - ) = ( - 'Integer', 'String', 'Number', 'Boolean', 'Map', 'List', - ) - - def __init__(self, description=None, default=None, required=False, - schema=None, constraints=None, min_version=None, - max_version=None): - if schema is not None: - if type(self) not in (List, Map, Operation): - msg = _('Schema valid only for List or Map, not %s' - ) % self[self.TYPE] - raise exc.ESchema(message=msg) - - if self[self.TYPE] == self.LIST: - self.schema = AnyIndexDict(schema) - else: - self.schema = schema - - self.description = description - self.default = default - self.required = required - self.constraints = constraints or [] - self._len = None - self.min_version = min_version - self.max_version = max_version - - def has_default(self): - return self.default is not None - - def get_default(self): - return self.resolve(self.default) - - def _validate_default(self, context): - if self.default is None: - return - - try: - # NOTE: this is the subclass's version of 'validate' - self.validate(self.default, context) - except (ValueError, TypeError) as ex: - msg = _('Invalid default %(default)s: %(exc)s' - ) % dict(default=self.default, exc=ex) - raise exc.ESchema(message=msg) - - def validate_constraints(self, value, schema=None, context=None): - try: - for constraint in self.constraints: - constraint.validate(value, schema=schema, context=context) - except ValueError as ex: - raise exc.ESchema(message=str(ex)) - - def _validate_version(self, key, version): - if self.min_version and self.min_version > version: - msg = _('%(key)s (min_version=%(min)s) is not supported by ' - 'spec version %(version)s.' - ) % {'key': key, 'min': self.min_version, - 'version': version} - raise exc.ESchema(message=msg) - if self.max_version: - if version > self.max_version: - msg = _('%(key)s (max_version=%(max)s) is not supported ' - 'by spec version %(version)s.' - ) % {'version': version, 'max': self.max_version, - 'key': key} - raise exc.ESchema(message=msg) - else: - LOG.warning('Warning: %(key)s will be deprecated after ' - 'version %(version)s!', - {'key': key, 'version': self.max_version}) - - def __getitem__(self, key): - if key == self.DESCRIPTION: - if self.description is not None: - return self.description - elif key == self.DEFAULT: - if self.default is not None: - return self.default - elif key == self.SCHEMA: - if self.schema is not None: - return dict((n, dict(s)) for n, s in self.schema.items()) - elif key == self.REQUIRED: - return self.required - elif key == self.CONSTRAINTS: - if self.constraints: - return [dict(c) for c in self.constraints] - - raise KeyError(key) - - def __iter__(self): - for k in self.KEYS: - try: - self[k] - except KeyError: - pass - else: - yield k - - def __len__(self): - if self._len is None: - self._len = len(list(iter(self))) - return self._len - - -class PropertySchema(SchemaBase): - """Class for validating profile and policy specifications.""" - - KEYS = ( - TYPE, DESCRIPTION, DEFAULT, REQUIRED, SCHEMA, UPDATABLE, - CONSTRAINTS, MIN_VERSION, MAX_VERSION, - ) = ( - 'type', 'description', 'default', 'required', 'schema', 'updatable', - 'constraints', 'min_version', 'max_version', - ) - - def __init__(self, description=None, default=None, required=False, - schema=None, updatable=False, constraints=None, - min_version=None, max_version=None): - super(PropertySchema, self).__init__(description=description, - default=default, - required=required, schema=schema, - constraints=constraints, - min_version=min_version, - max_version=max_version) - self.updatable = updatable - - def __getitem__(self, key): - # NOTE: UPDATABLE is only applicable to some specs which may be - # eligible for an update operation later - if key == self.UPDATABLE: - return self.updatable - - return super(PropertySchema, self).__getitem__(key) - - -class Boolean(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.BOOLEAN - return super(Boolean, self).__getitem__(key) - - def to_schema_type(self, value): - try: - return strutils.bool_from_string(str(value), strict=True) - except ValueError: - msg = _("The value '%s' is not a valid Boolean") % value - raise exc.ESchema(message=msg) - - def resolve(self, value): - return self.to_schema_type(value) - - def validate(self, value, context=None): - if isinstance(value, bool): - return - - self.resolve(value) - - -class Integer(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.INTEGER - return super(Integer, self).__getitem__(key) - - def to_schema_type(self, value): - if value is None: - return None - - if isinstance(value, int): - return value - - try: - num = int(value) - except ValueError: - msg = _("The value '%s' is not a valid Integer") % value - raise exc.ESchema(message=msg) - - return num - - def resolve(self, value): - return self.to_schema_type(value) - - def validate(self, value, context=None): - if not isinstance(value, int): - value = self.resolve(value) - - self.validate_constraints(value, schema=self, context=context) - - -class String(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.STRING - return super(String, self).__getitem__(key) - - def to_schema_type(self, value): - try: - if isinstance(value, str): - return value - return str(value) if value is not None else None - except Exception: - raise - - def resolve(self, value): - return self.to_schema_type(value) - - def validate(self, value, context=None): - if value is None: - msg = _("The value '%s' is not a valid string.") % value - raise exc.ESchema(message=msg) - - self.resolve(value) - self.validate_constraints(value, schema=self, context=context) - - -class Number(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.NUMBER - return super(Number, self).__getitem__(key) - - def to_schema_type(self, value): - if isinstance(value, numbers.Number): - return value - - try: - return int(value) - except ValueError: - try: - return float(value) - except ValueError: - msg = _("The value '%s' is not a valid number.") % value - raise exc.ESchema(message=msg) - - def resolve(self, value): - return self.to_schema_type(value) - - def validate(self, value, context=None): - if not isinstance(value, numbers.Number): - value = self.resolve(value) - - self.validate_constraints(value, schema=self, context=context) - - -class List(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.LIST - return super(List, self).__getitem__(key) - - def _get_children(self, values, context=None): - res = [] - for i in range(len(values)): - res.append(self.schema[i].resolve(values[i])) - return res - - def resolve(self, value, context=None): - if not isinstance(value, collections.abc.Sequence): - raise TypeError(_('"%s" is not a List') % value) - - return [v for v in self._get_children(value, context=context)] - - def validate(self, value, context=None): - # if not isinstance(value, collections.abc.Mapping): - if not isinstance(value, collections.abc.Sequence): - msg = _("'%s' is not a List") % value - raise exc.ESchema(message=msg) - - for v in value: - self.schema['*'].validate(v, context=context) - - -class Map(PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return self.MAP - return super(Map, self).__getitem__(key) - - def _get_children(self, values, context=None): - # There are cases where the Map is not specified to the very - # detailed levels, we treat them as valid specs as well. - if self.schema is None: - return values - - sub_schema = self.schema - if sub_schema is not None: - # sub_schema should be a dict here. - subspec = Spec(sub_schema, dict(values)) - subspec.validate() - - return ((k, subspec[k]) for k in sub_schema) - else: - return values - - def get_default(self): - if self.default is None: - return {} - - if not isinstance(self.default, collections.abc.Mapping): - msg = _("'%s' is not a Map") % self.default - raise exc.ESchema(message=msg) - - return self.default - - def resolve(self, value, context=None): - if isinstance(value, str): - try: - value = jsonutils.loads(value) - except (TypeError, ValueError): - msg = _("'%s' is not a Map") % value - raise exc.ESchema(message=msg) - - if not isinstance(value, collections.abc.Mapping): - msg = _("'%s' is not a Map") % value - raise exc.ESchema(message=msg) - - return dict(self._get_children(value.items(), context)) - - def validate(self, value, context=None): - if not isinstance(value, collections.abc.Mapping): - msg = _("'%s' is not a Map") % value - raise exc.ESchema(message=msg) - - if not self.schema: - return - - for key, child in self.schema.items(): - item_value = value.get(key) - if item_value: - child.validate(item_value, context) - - -class StringParam(SchemaBase): - - def __getitem__(self, key): - if key == self.TYPE: - return self.STRING - return super(StringParam, self).__getitem__(key) - - def validate(self, value): - if not isinstance(value, str): - raise TypeError("value is not a string") - - self.validate_constraints(value) - - -class IntegerParam(SchemaBase): - - def __getitem__(self, key): - if key == self.TYPE: - return self.INTEGER - return super(IntegerParam, self).__getitem__(key) - - def validate(self, value): - try: - int(value) - except ValueError: - msg = _("The value '%s' is not a valid Integer") % value - raise ValueError(msg) - - self.validate_constraints(value) - - -class Operation(SchemaBase): - """Class for specifying operations on profiles.""" - - KEYS = ( - DESCRIPTION, PARAMETERS, - ) = ( - 'description', 'parameters', - ) - - def __getitem__(self, key): - if key == self.DESCRIPTION: - return self.description or "Undocumented" - elif key == self.PARAMETERS: - if self.schema is None: - return {} - return dict((n, dict(s)) for n, s in self.schema.items()) - - def validate(self, data, version=None): - for k in data: - if k not in self.schema: - msg = _("Unrecognizable parameter '%s'") % k - raise exc.ESchema(message=msg) - - for (k, s) in self.schema.items(): - try: - if k in data: - s.validate(data[k]) - elif s.required: - msg = _("Required parameter '%s' not provided") % k - raise exc.ESchema(message=msg) - - if version: - s._validate_version(k, version) - except (TypeError, ValueError) as ex: - raise exc.ESchema(message=str(ex)) - - -class Spec(collections.abc.Mapping): - """A class that contains all spec items.""" - - def __init__(self, schema, data, version=None): - self._schema = schema - self._data = data - self._version = version - - def validate(self): - """Validate the schema.""" - - for (k, s) in self._schema.items(): - try: - # Validate through resolve - self.resolve_value(k) - - # Validate schema for version - if self._version: - self._schema[k]._validate_version(k, self._version) - except (TypeError, ValueError) as err: - raise exc.ESchema(message=str(err)) - - for key in self._data: - if key not in self._schema: - msg = _("Unrecognizable spec item '%s'") % key - raise exc.ESchema(message=msg) - - def resolve_value(self, key): - if key not in self: - raise exc.ESchema(message="Invalid spec item: %s" % key) - - schema_item = self._schema[key] - if key in self._data: - raw_value = self._data[key] - schema_item.validate(raw_value) - return schema_item.resolve(raw_value) - elif schema_item.has_default(): - return schema_item.get_default() - elif schema_item.required: - msg = _("Required spec item '%s' not provided") % key - raise exc.ESchema(message=msg) - - def __getitem__(self, key): - """Lazy evaluation for spec items.""" - return self.resolve_value(key) - - def __len__(self): - """Number of items in the spec. - - A spec always contain all keys though some may be not specified. - """ - return len(self._schema) - - def __contains__(self, key): - return key in self._schema - - def __iter__(self): - return iter(self._schema) - - -def get_spec_version(spec): - if not isinstance(spec, dict): - msg = _('The provided spec is not a map.') - raise exc.ESchema(message=msg) - - if 'type' not in spec: - msg = _("The 'type' key is missing from the provided spec map.") - raise exc.ESchema(message=msg) - - if 'version' not in spec: - msg = _("The 'version' key is missing from the provided spec map.") - raise exc.ESchema(message=msg) - - return spec['type'], str(spec['version']) diff --git a/senlin/common/service.py b/senlin/common/service.py deleted file mode 100644 index 34a9bf756..000000000 --- a/senlin/common/service.py +++ /dev/null @@ -1,121 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging -from oslo_service import service -from oslo_utils import uuidutils - -from senlin.common import context as senlin_context -import senlin.conf -from senlin.objects import service as service_obj -from senlin import version - -CONF = senlin.conf.CONF -LOG = logging.getLogger(__name__) - - -class Service(service.Service): - def __init__(self, name, host, topic, threads=None): - self.tg = None - super(Service, self).__init__(threads or 1000) - self.name = name - self.host = host - self.topic = topic - - self.server = None - self.service_id = None - self.cleanup_timer = None - self.cleanup_count = 0 - self.service_report_timer = None - - # Start the service cleanup process. This is only going to be - # running on the main process. - if self.tg: - self.cleanup_timer = self.tg.add_timer( - CONF.periodic_interval, self.service_manage_cleanup - ) - - def start(self): - super(Service, self).start() - self.service_id = uuidutils.generate_uuid() - LOG.info( - 'Starting %(name)s service (version: %(version)s ' - 'id: %(service_id)s)', - { - 'name': self.name, - 'version': version.version_info.version_string(), - 'service_id': self.service_id, - } - ) - ctx = senlin_context.get_admin_context() - service_obj.Service.create( - ctx, self.service_id, self.host, self.name, self.topic - ) - self.service_report_timer = self.tg.add_timer( - CONF.periodic_interval, self.service_manage_report - ) - - def stop(self, graceful=True): - LOG.info( - 'Stopping %(name)s service (id: %(service_id)s)', - { - 'name': self.name, - 'service_id': self.service_id or 'main', - } - ) - if self.service_report_timer: - self.service_report_timer.stop() - self.service_report_timer = None - if self.cleanup_timer: - self.cleanup_timer.stop() - self.cleanup_timer = None - if self.service_id: - service_obj.Service.delete(self.service_id) - super(Service, self).stop(graceful) - - def service_manage_cleanup(self): - self.cleanup_count += 1 - try: - service_obj.Service.cleanup_all_expired(self.name) - except Exception as ex: - LOG.error( - 'Error while cleaning up service %(name)s: %(ex)s', - { - 'name': self.name, - 'ex': ex, - } - ) - - # The clean-up process runs during service startup and will over - # multiple attempts check to see if any services have reach the - # deadline and if so remove them. This is only done on startup, or - # after a service recovers from a crash. - if self.cleanup_count >= 5: - self.cleanup_timer.stop() - self.cleanup_timer = None - LOG.info('Finished cleaning up dead services.') - else: - LOG.info('Service clean-up attempt count: %s', self.cleanup_count) - - def service_manage_report(self): - try: - ctx = senlin_context.get_admin_context() - service_obj.Service.update(ctx, self.service_id) - except Exception as ex: - LOG.error( - 'Error while updating service %(name)s: %(ex)s', - { - 'name': self.name, - 'ex': ex, - } - ) diff --git a/senlin/common/utils.py b/senlin/common/utils.py deleted file mode 100644 index b3f7b5d43..000000000 --- a/senlin/common/utils.py +++ /dev/null @@ -1,242 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common utilities module. -""" - -import random -import re -import string - -from jsonpath_rw import parse -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import timeutils -import requests -import urllib - -from senlin.common import consts -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.objects import service as service_obj - -cfg.CONF.import_opt('max_response_size', 'senlin.conf') -cfg.CONF.import_opt('periodic_interval', 'senlin.conf') - -LOG = logging.getLogger(__name__) -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' - - -class URLFetchError(exception.Error, IOError): - pass - - -def get_positive_int(v): - """Util function converting/checking a value of positive integer. - - :param v: A value to be checked. - :returns: (b, v) where v is (converted) value if bool is True. - b is False if the value fails validation. - """ - if strutils.is_int_like(v): - count = int(v) - if count > 0: - return True, count - return False, 0 - - -def parse_level_values(values): - """Parse a given list of level values to numbers. - - :param values: A list of event level values. - :return: A list of translated values. - """ - if not isinstance(values, list): - values = [values] - result = [] - for v in values: - if v in consts.EVENT_LEVELS: - result.append(consts.EVENT_LEVELS[v]) - elif isinstance(v, int): - result.append(v) - - if result == []: - return None - return result - - -def level_from_number(value): - """Parse a given level value(from number to string). - - :param value: event level number. - :return: A translated value. - """ - n = int(value) - levels = {value: key for key, value in consts.EVENT_LEVELS.items()} - return levels.get(n, None) - - -def url_fetch(url, timeout=1, allowed_schemes=('http', 'https'), verify=True): - """Get the data at the specified URL. - - The URL must use the http: or https: schemes. - The file: scheme is also supported if you override - the allowed_schemes argument. - Raise an IOError if getting the data fails. - """ - - components = urllib.parse.urlparse(url) - - if components.scheme not in allowed_schemes: - raise URLFetchError(_('Invalid URL scheme %s') % components.scheme) - - if components.scheme == 'file': - try: - return urllib.request.urlopen(url, timeout=timeout).read() - except urllib.error.URLError as uex: - raise URLFetchError(_('Failed to retrieve data: %s') % uex) - - try: - resp = requests.get(url, stream=True, verify=verify, timeout=timeout) - resp.raise_for_status() - - # We cannot use resp.text here because it would download the entire - # file, and a large enough file would bring down the engine. The - # 'Content-Length' header could be faked, so it's necessary to - # download the content in chunks to until max_response_size is reached. - # The chunk_size we use needs to balance CPU-intensive string - # concatenation with accuracy (eg. it's possible to fetch 1000 bytes - # greater than max_response_size with a chunk_size of 1000). - reader = resp.iter_content(chunk_size=1000) - result = "" - for chunk in reader: - if isinstance(chunk, bytes): - chunk = chunk.decode('utf-8') - result += chunk - if len(result) > cfg.CONF.max_response_size: - raise URLFetchError("Data exceeds maximum allowed size (%s" - " bytes)" % cfg.CONF.max_response_size) - return result - - except requests.exceptions.RequestException as ex: - raise URLFetchError(_('Failed to retrieve data: %s') % ex) - - -def random_name(length=8): - if length <= 0: - return '' - - lead = random.choice(string.ascii_letters) - tail = ''.join(random.choice(string.ascii_letters + string.digits) - for _ in range(length - 1)) - return lead + tail - - -def format_node_name(fmt, cluster, index): - """Generates a node name using the given format. - - :param fmt: A string containing format directives. Currently we only - support the following keys: - - "$nR": a random string with at most 'n' characters where - 'n' defaults to 8. - - "$nI": a string representation of the node index where 'n' - instructs the number of digits generated with 0s - padded to the left. - :param cluster: The DB object for the cluster to which the node belongs. - This parameter is provided for future extension. - :param index: The index for the node in the target cluster. - :returns: A string containing the generated node name. - """ - # for backward compatibility - if not fmt: - fmt = "node-$8R" - - result = "" - last = 0 - pattern = re.compile("(\$\d{0,8}[rRI])") - for m in pattern.finditer(fmt): - group = m.group() - t = group[-1] - width = group[1:-1] - if t == "R" or t == "r": # random string - if width != "": - sub = random_name(int(width)) - else: - sub = random_name(8) - if t == "r": - sub = sub.lower() - elif t == "I": # node index - if width != "": - str_index = str(index) - sub = str_index.zfill(int(width)) - else: - sub = str(index) - result += fmt[last:m.start()] + sub - last = m.end() - result += fmt[last:] - - return result - - -def isotime(at): - """Stringify time in ISO 8601 format. - - oslo.versionedobject is using this function for datetime formatting. - """ - if at is None: - return None - - st = at.strftime(_ISO8601_TIME_FORMAT) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' or tz == "UTC+00:00" else tz) - return st - - -def get_path_parser(path): - """Get a JsonPath parser based on a path string. - - :param path: A string containing a JsonPath. - :returns: A parser used for path matching. - :raises: An exception of `BadRequest` if the path fails validation. - """ - try: - expr = parse(path) - except Exception as ex: - error_text = str(ex) - error_msg = error_text.split(':', 1)[1] - raise exception.BadRequest( - msg=_("Invalid attribute path - %s") % error_msg.strip()) - - return expr - - -def is_service_dead(ctx, service_id, duration=None): - """Check if a service is dead. - - If the service hasn't reported its status for the given duration, it is - treated as a dead service. - - :param ctx: A request context. - :param service_id: The ID of the service to test. - :param duration: The time duration in seconds. - """ - if not duration: - duration = 2.2 * cfg.CONF.periodic_interval - - service = service_obj.Service.get(ctx, service_id) - if not service: - return True - if timeutils.is_older_than(service.updated_at, duration): - return True - return False diff --git a/senlin/conductor/__init__.py b/senlin/conductor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/conductor/service.py b/senlin/conductor/service.py deleted file mode 100644 index e112e2535..000000000 --- a/senlin/conductor/service.py +++ /dev/null @@ -1,2605 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import functools - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -from oslo_utils import timeutils -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import context as senlin_context -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.common import messaging as rpc_messaging -from senlin.common import scaleutils as su -from senlin.common import schema -from senlin.common import service -from senlin.common import utils -from senlin.engine.actions import base as action_mod -from senlin.engine.actions import cluster_action as cluster_action_mod -from senlin.engine import cluster as cluster_mod -from senlin.engine import dispatcher -from senlin.engine import environment -from senlin.engine import node as node_mod -from senlin.engine.receivers import base as receiver_mod -from senlin.objects import action as action_obj -from senlin.objects import base as obj_base -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cp_obj -from senlin.objects import credential as cred_obj -from senlin.objects import event as event_obj -from senlin.objects import node as node_obj -from senlin.objects import policy as policy_obj -from senlin.objects import profile as profile_obj -from senlin.objects import receiver as receiver_obj -from senlin.policies import base as policy_base -from senlin.profiles import base as profile_base - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -def request_context(func): - @functools.wraps(func) - def wrapped(self, ctx, req): - if ctx and not isinstance(ctx, senlin_context.RequestContext): - ctx = senlin_context.RequestContext.from_dict(ctx.to_dict()) - - obj = obj_base.SenlinObject.obj_class_from_name( - req['senlin_object.name'], - req['senlin_object.version']) - req_obj = obj.obj_from_primitive(req) - try: - return func(self, ctx, req_obj) - except exception.SenlinException: - raise oslo_messaging.rpc.dispatcher.ExpectedException() - return wrapped - - -@profiler.trace_cls("rpc") -class ConductorService(service.Service): - """Lifecycle manager for a running service engine. - - - All the contained methods here are called from the RPC client. - - If a RPC call does not have a corresponding method here, an exception - will be thrown. - - Arguments to these calls are added dynamically and will be treated as - keyword arguments by the RPC client. - """ - def __init__(self, host, topic): - super(ConductorService, self).__init__( - self.service_name, host, topic, - threads=CONF.conductor.threads - ) - self.dispatcher_topic = consts.ENGINE_TOPIC - self.health_mgr_topic = consts.HEALTH_MANAGER_TOPIC - - # The following are initialized here and will be assigned in start() - # which happens after the fork when spawning multiple worker processes - self.target = None - - # Initialize the global environment - environment.initialize() - - @property - def service_name(self): - return 'senlin-conductor' - - def start(self): - super(ConductorService, self).start() - - self.target = oslo_messaging.Target(version=consts.RPC_API_VERSION, - server=self.host, - topic=self.topic) - serializer = obj_base.VersionedObjectSerializer() - self.server = rpc_messaging.get_rpc_server(self.target, self, - serializer=serializer) - self.server.start() - - def stop(self, graceful=False): - if self.server: - self.server.stop() - self.server.wait() - super(ConductorService, self).stop(graceful) - - @request_context - def credential_create(self, ctx, req): - """Create the credential based on the context. - - We may add more parameters in future to the query parameter, for - example as Senlin expands its support to non-OpenStack backends. - - :param ctx: An instance of the request context. - :param req: An instance of the CredentialCreateRequest. - :return: A dictionary containing the persistent credential. - """ - values = { - 'user': ctx.user_id, - 'project': ctx.project_id, - 'cred': req.cred - } - cred_obj.Credential.update_or_create(ctx, values) - return {'cred': req.cred} - - @request_context - def credential_get(self, ctx, req): - """Get the credential based on the context. - - We may add more parameters in future to the req.query, for - example as Senlin expands its support to non-OpenStack backends. - - :param ctx: An instance of the request context. - :param req: An instance of the CredentialGetRequest. - :return: A dictionary containing the persistent credential, or None - if no matching credential is found. - """ - res = cred_obj.Credential.get(ctx, req.user, req.project) - if res is None: - return None - return res.cred.get('openstack', None) - - @request_context - def credential_update(self, ctx, req): - """Update a credential based on the context and provided value. - - We may add more parameters in future to the query parameter, for - example as Senlin expands its support to non-OpenStack backends. - - :param ctx: An instance of the request context. - :param req: An instance of the CredentialUpdateRequest. - :return: A dictionary containing the persistent credential. - """ - cred_obj.Credential.update(ctx, ctx.user_id, ctx.project_id, - {'cred': req.cred}) - return {'cred': req.cred} - - @request_context - def get_revision(self, ctx, req): - return CONF.revision['senlin_engine_revision'] - - @request_context - def profile_type_list(self, ctx, req): - """List known profile type implementations. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileTypeListRequest. - :return: A list of profile types. - """ - return environment.global_env().get_profile_types() - - @request_context - def profile_type_get(self, ctx, req): - """Get the details about a profile type. - - :param ctx: An instance of the request context. - :param req: An instance of ProfileTypeGetRequest. - :return: The details about a profile type. - """ - profile = environment.global_env().get_profile(req.type_name) - data = profile.get_schema() - - return { - 'name': req.type_name, - 'schema': data, - 'support_status': profile.VERSIONS - } - - @request_context - def profile_type_ops(self, ctx, req): - """List the operations supported by a profile type. - - :param ctx: An instance of the request context. - :param req: An instance of ProfileTypeOpListRequest. - :return: A dictionary containing the operations supported by the - profile type. - """ - try: - pt = environment.global_env().get_profile(req.type_name) - except exception.ResourceNotFound as ex: - raise exception.BadRequest(msg=str(ex)) - - return {'operations': pt.get_ops()} - - @request_context - def profile_list(self, ctx, req): - """List profiles matching the specified criteria. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileListRequest object. - :return: A list of `Profile` object representations. - """ - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - if req.obj_attr_is_set('type'): - filters['type'] = req.type - if filters: - query['filters'] = filters - - profiles = profile_obj.Profile.get_all(ctx, **query) - return [p.to_dict() for p in profiles] - - def _validate_profile(self, ctx, spec, name=None, - metadata=None, validate_props=False): - """Validate a profile. - - :param ctx: An instance of the request context. - :param name: The name of the profile to be validated. - :param spec: A dictionary containing the spec for the profile. - :param metadata: A dictionary containing optional key-value pairs to - be associated with the profile. - :param validate_props: Whether to validate if provide a valid Value - to property. - :return: Validated profile object. - """ - type_name, version = schema.get_spec_version(spec) - type_str = "-".join([type_name, version]) - plugin = environment.global_env().get_profile(type_str) - - kwargs = { - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - 'metadata': metadata - } - if name is None: - name = 'validated_profile' - profile = plugin(name, spec, **kwargs) - try: - profile.validate(validate_props=validate_props) - except exception.ESchema as ex: - msg = str(ex) - LOG.error("Failed in validating profile: %s", msg) - raise exception.InvalidSpec(message=msg) - - return profile - - @request_context - def profile_create(self, ctx, req): - """Create a profile with the given properties. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileCreateRequest object. - :return: A dictionary containing the details of the profile object - created. - """ - name = req.profile.name - if CONF.name_unique: - if profile_obj.Profile.get_by_name(ctx, name): - msg = _("A profile named '%(name)s' already exists." - ) % {"name": name} - raise exception.BadRequest(msg=msg) - - metadata = {} - if req.profile.obj_attr_is_set('metadata'): - metadata = req.profile.metadata - - LOG.info("Creating profile '%s'.", name) - - # NOTE: we get the Profile subclass directly to ensure we are calling - # the correct methods. - type_name, version = schema.get_spec_version(req.profile.spec) - type_str = "-".join([type_name, version]) - cls = environment.global_env().get_profile(type_str) - profile = cls.create(ctx, name, req.profile.spec, metadata=metadata) - - LOG.info("Profile %(name)s is created: %(id)s.", - {'name': name, 'id': profile.id}) - - return profile.to_dict() - - @request_context - def profile_validate(self, ctx, req): - """Validate a profile with the given properties. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileValidateRequest. - :return: A dictionary containing the details of the profile object - validated. - """ - profile = self._validate_profile(ctx, req.profile.spec, - validate_props=True) - - return profile.to_dict() - - @request_context - def profile_get(self, ctx, req): - """Retrieve the details about a profile. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileGetRequest. - :return: A dictionary containing the profile details, or an exception - of type `ResourceNotFound` if no matching object is found. - """ - kwargs = {"project_safe": not ctx.is_admin} - profile = profile_obj.Profile.find(ctx, req.identity, **kwargs) - return profile.to_dict() - - @request_context - def profile_update(self, ctx, req): - """Update the properties of a given profile. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileUpdateRequest object. - :returns: A dictionary containing the details of the updated profile, - or an exception `ResourceNotFound` if no matching profile is - found. - """ - LOG.info("Updating profile '%(id)s.'", {'id': req.identity}) - db_profile = profile_obj.Profile.find(ctx, req.identity) - profile = profile_base.Profile.load(ctx, profile=db_profile) - changed = False - if (req.profile.obj_attr_is_set('name') and - req.profile.name is not None): - if req.profile.name != profile.name: - profile.name = req.profile.name - changed = True - if req.profile.obj_attr_is_set('metadata'): - if req.profile.metadata != profile.metadata: - profile.metadata = req.profile.metadata - changed = True - if changed: - profile.store(ctx) - else: - msg = _("No property needs an update.") - raise exception.BadRequest(msg=msg) - - LOG.info("Profile '%(id)s' is updated.", {'id': req.identity}) - return profile.to_dict() - - @request_context - def profile_delete(self, ctx, req): - """Delete the specified profile. - - :param ctx: An instance of the request context. - :param req: An instance of the ProfileDeleteRequest. - :return: None if succeeded or an exception of `ResourceInUse` if - profile is referenced by certain clusters/nodes. - """ - db_profile = profile_obj.Profile.find(ctx, req.identity) - LOG.info("Deleting profile '%s'.", req.identity) - - cls = environment.global_env().get_profile(db_profile.type) - try: - cls.delete(ctx, db_profile.id) - except exception.EResourceBusy: - reason = _("still referenced by some clusters and/or nodes.") - raise exception.ResourceInUse(type='profile', id=db_profile.id, - reason=reason) - LOG.info("Profile '%s' is deleted.", req.identity) - - @request_context - def policy_type_list(self, ctx, req): - """List known policy type implementations. - - :param ctx: An instance of the request context. - :param req: An instance of the PolicyTypeListRequest. - :return: A list of policy types. - """ - return environment.global_env().get_policy_types() - - @request_context - def policy_type_get(self, ctx, req): - """Get the details about a policy type. - - :param ctx: An instance of the request context. - :param req: An instance of PolicyTypeGetRequest. - :return: The details about a policy type. - """ - policy_type = environment.global_env().get_policy(req.type_name) - data = policy_type.get_schema() - - return { - 'name': req.type_name, - 'schema': data, - 'support_status': policy_type.VERSIONS - } - - @request_context - def policy_list(self, ctx, req): - """List policies matching the specified criteria - - :param ctx: An instance of request context. - :param req: An instance of the PolicyListRequest. - :return: A List of `Policy` object representations. - """ - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - if req.obj_attr_is_set('type'): - filters['type'] = req.type - if filters: - query['filters'] = filters - - return [p.to_dict() for p in policy_obj.Policy.get_all(ctx, **query)] - - def _validate_policy(self, ctx, spec, name=None, validate_props=False): - """Validate a policy. - - :param ctx: An instance of the request context. - :param spec: A dictionary containing the spec for the policy. - :param name: The name of the policy to be validated. - :param validate_props: Whether to validate the value of property. - :return: Validated policy object. - """ - - type_name, version = schema.get_spec_version(spec) - type_str = "-".join([type_name, version]) - - plugin = environment.global_env().get_policy(type_str) - - kwargs = { - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - } - if name is None: - name = 'validated_policy' - policy = plugin(name, spec, **kwargs) - - try: - policy.validate(ctx, validate_props=validate_props) - except exception.InvalidSpec as ex: - msg = str(ex) - LOG.error("Failed in validating policy: %s", msg) - raise exception.InvalidSpec(message=msg) - - return policy - - @request_context - def policy_create(self, ctx, req): - """Create a policy with the given name and spec. - - :param ctx: An instance of the request context. - :param req: An instance of the PolicyCreateRequestBody. - :return: A dictionary containing the details of the policy object - created. - """ - name = req.name - - if CONF.name_unique: - if policy_obj.Policy.get_by_name(ctx, name): - msg = _("A policy named '%(name)s' already exists." - ) % {"name": name} - raise exception.BadRequest(msg=msg) - - policy = self._validate_policy(ctx, req.spec, name=name, - validate_props=True) - - LOG.info("Creating policy %(type)s '%(name)s'", - {'type': policy.type, 'name': policy.name}) - - policy.store(ctx) - LOG.info("Policy '%(name)s' is created: %(id)s.", - {'name': name, 'id': policy.id}) - return policy.to_dict() - - @request_context - def policy_get(self, ctx, req): - """Retrieve the details about a policy. - - :param ctx: An instance of request context. - :param req: An instance of the PolicyGetRequest. - :return: A dictionary containing the policy details. - """ - policy = policy_obj.Policy.find(ctx, req.identity) - return policy.to_dict() - - @request_context - def policy_update(self, ctx, req): - """Update the properties of a given policy - - :param ctx: An instance of request context. - :param req: An instance of the PolicyUpdateRequest. - :return: A dictionary containing the policy details. - """ - db_policy = policy_obj.Policy.find(ctx, req.identity) - policy = policy_base.Policy.load(ctx, db_policy=db_policy) - - changed = False - if (req.policy.name is not None and - req.policy.name != policy.name): - LOG.info("Updating policy '%s'.", req.identity) - policy.name = req.policy.name - changed = True - policy.store(ctx) - LOG.info("Policy '%s' is updated.", req.identity) - - if not changed: - msg = _("No property needs an update.") - raise exception.BadRequest(msg=msg) - - return policy.to_dict() - - @request_context - def policy_delete(self, ctx, req): - """Delete the specified policy. - - :param ctx: An instance of the request context. - :param req: An instance of the PolicyDeleteRequest. - :return: None if succeeded or an exception of `ResourceInUse` if - policy is still attached to certain clusters. - """ - db_policy = policy_obj.Policy.find(ctx, req.identity) - LOG.info("Deleting policy '%s'.", req.identity) - try: - policy_base.Policy.delete(ctx, db_policy.id) - except exception.EResourceBusy: - reason = _("still attached to some clusters") - raise exception.ResourceInUse(type='policy', id=req.identity, - reason=reason) - LOG.info("Policy '%s' is deleted.", req.identity) - - @request_context - def policy_validate(self, ctx, req): - """Validate a policy with the given properties. - - :param ctx: An instance of the request context. - :param req: An instance of the PolicyValidateRequestBody. - :return: A dictionary containing the details of the policy object - validated. - """ - - policy = self._validate_policy(ctx, req.spec, validate_props=False) - - return policy.to_dict() - - @request_context - def cluster_list(self, ctx, req): - """List clusters matching the specified criteria. - - :param ctx: An instance of request context. - :param req: An instance of the ClusterListRequest. - :return: A list of `Cluster` object representations. - """ - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - if req.obj_attr_is_set('status'): - filters['status'] = req.status - if filters: - query['filters'] = filters - - return [c.to_dict() for c in co.Cluster.get_all(ctx, **query)] - - @request_context - def cluster_get(self, context, req): - """Retrieve the cluster specified. - - :param context: An instance of the request context. - :param req: An instance of the ClusterGetRequest. - :return: A dictionary containing the details about a cluster. - """ - kwargs = {"project_safe": not context.is_admin} - - cluster = co.Cluster.find(context, req.identity, **kwargs) - return cluster.to_dict() - - def check_cluster_quota(self, context): - """Validate the number of clusters created in a project. - - :param context: An instance of the request context. - :return: None if cluster creation is okay, or an exception of type - `Forbidden` if number of clusters reaches the maximum. - """ - existing = co.Cluster.count_all(context) - maximum = CONF.max_clusters_per_project - if existing >= maximum: - raise exception.OverQuota() - - @request_context - def cluster_create(self, ctx, req): - """Create a cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterCreateRequestBody object. - :return: A dictionary containing the details about the cluster and the - ID of the action triggered by this operation. - """ - self.check_cluster_quota(ctx) - if CONF.name_unique: - if co.Cluster.get_by_name(ctx, req.name): - msg = _("a cluster named '%s' already exists.") % req.name - raise exception.BadRequest(msg=msg) - - try: - db_profile = profile_obj.Profile.find(ctx, req.profile_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - if req.obj_attr_is_set('desired_capacity'): - desired = req.desired_capacity - elif req.obj_attr_is_set('min_size'): - desired = req.min_size - else: - desired = 0 - min_size = req.min_size if req.obj_attr_is_set('min_size') else None - max_size = req.max_size if req.obj_attr_is_set('max_size') else None - res = su.check_size_params(None, desired, min_size, max_size, True) - if res: - raise exception.BadRequest(msg=res) - - # set defaults to the request object - req.obj_set_defaults() - - LOG.info("Creating cluster '%s'.", req.name) - - values = { - 'name': req.name, - 'profile_id': db_profile.id, - 'desired_capacity': desired, - 'min_size': req.min_size or consts.CLUSTER_DEFAULT_MIN_SIZE, - 'max_size': req.max_size or consts.CLUSTER_DEFAULT_MAX_SIZE, - 'next_index': 1, - 'timeout': req.timeout or cfg.CONF.default_action_timeout, - 'status': consts.CS_INIT, - 'status_reason': 'Initializing', - 'data': {}, - 'metadata': req.metadata or {}, - 'dependents': {}, - 'config': req.config or {}, - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - } - cluster = co.Cluster.create(ctx, values) - - # Build an Action for cluster creation - kwargs = { - 'name': 'cluster_create_%s' % cluster.id[:8], - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - } - action_id = action_mod.Action.create(ctx, cluster.id, - consts.CLUSTER_CREATE, **kwargs) - dispatcher.start_action() - LOG.info("Cluster create action queued: %s.", action_id) - - result = cluster.to_dict() - result['action'] = action_id - return result - - @request_context - def cluster_update(self, ctx, req): - """Update a cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterUpdateRequest object. - :return: A dictionary containing the details about the cluster and the - ID of the action triggered by this operation. - """ - cluster = co.Cluster.find(ctx, req.identity) - if cluster.status == consts.CS_ERROR: - msg = _('Updating a cluster in error state') - LOG.error(msg) - raise exception.FeatureNotSupported(feature=msg) - - LOG.info("Updating cluster '%s'.", req.identity) - - inputs = {} - if (req.obj_attr_is_set(consts.CLUSTER_PROFILE) and - req.profile_id is not None): - old_profile = profile_obj.Profile.find(ctx, cluster.profile_id) - try: - new_profile = profile_obj.Profile.find(ctx, req.profile_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - if new_profile.type != old_profile.type: - msg = _('Cannot update a cluster to a different profile type, ' - 'operation aborted.') - raise exception.BadRequest(msg=msg) - if old_profile.id != new_profile.id: - inputs['new_profile_id'] = new_profile.id - - if (req.obj_attr_is_set(consts.CLUSTER_METADATA) and - req.metadata != cluster.metadata): - inputs['metadata'] = copy.deepcopy(req.metadata) - - if (req.obj_attr_is_set(consts.CLUSTER_TIMEOUT) and - req.timeout != cluster.timeout): - inputs['timeout'] = req.timeout - - if (req.obj_attr_is_set(consts.CLUSTER_NAME) and - req.name != cluster.name): - inputs['name'] = req.name - - if (req.obj_attr_is_set(consts.CLUSTER_CONFIG) and - req.config != cluster.config): - # TODO(anyone): updating cluster config is a multiplexed operation - # which have to be handled carefully. - inputs['config'] = req.config - - if req.obj_attr_is_set(consts.CLUSTER_PROFILE_ONLY): - inputs['profile_only'] = req.profile_only - - if not inputs: - msg = _("No property needs an update.") - raise exception.BadRequest(msg=msg) - - kwargs = { - 'name': 'cluster_update_%s' % cluster.id[:8], - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs, - } - action_id = action_mod.Action.create(ctx, cluster.id, - consts.CLUSTER_UPDATE, **kwargs) - dispatcher.start_action() - LOG.info("Cluster update action queued: %s.", action_id) - - resp = cluster.to_dict() - resp['action'] = action_id - return resp - - @request_context - def cluster_delete(self, ctx, req): - """Delete the specified cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterDeleteRequest object. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info('Deleting cluster %s', req.identity) - - # 'cluster' below is a DB object. - cluster = co.Cluster.find(ctx, req.identity) - - force = False - if req.obj_attr_is_set(consts.CLUSTER_DELETE_FORCE): - force = req.force - - if (not force and - cluster.status in [consts.CS_CREATING, - consts.CS_UPDATING, - consts.CS_DELETING, - consts.CS_RECOVERING]): - raise exception.ActionInProgress(type='cluster', id=req.identity, - status=cluster.status) - - # collect all errors - msg = [] - con_profiles = cluster.dependents.get('profiles', None) - if con_profiles is not None: - err = _("still referenced by profile(s): %s") % con_profiles - LOG.error(err) - msg.append(err) - - if msg: - raise exception.ResourceInUse(type='cluster', id=req.identity, - reason='\n'.join(msg)) - - params = { - 'name': 'cluster_delete_%s' % cluster.id[:8], - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - } - action_id = action_mod.Action.create(ctx, cluster.id, - consts.CLUSTER_DELETE, - force=True, **params) - dispatcher.start_action() - LOG.info("Cluster delete action queued: %s", action_id) - - return {'action': action_id} - - @request_context - def cluster_add_nodes(self, context, req): - """Add specified nodes to the specified cluster. - - :param context: An instance of the request context. - :param req: An instance of the ClusterAddNodesRequest object. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info("Adding nodes '%(nodes)s' to cluster '%(cluster)s'.", - {'cluster': req.identity, 'nodes': req.nodes}) - - db_cluster = co.Cluster.find(context, req.identity) - db_cluster_profile = profile_obj.Profile.get( - context, db_cluster.profile_id, project_safe=True) - cluster_profile_type = db_cluster_profile.type - - found = [] - not_found = [] - bad_nodes = [] - owned_nodes = [] - not_match_nodes = [] - for node in req.nodes: - try: - db_node = node_obj.Node.find(context, node) - # Check node status whether in ACTIVE - if db_node.status != consts.NS_ACTIVE: - bad_nodes.append(db_node.id) - # Check the node whether owned by any cluster - if db_node.cluster_id: - owned_nodes.append(db_node.id) - # check profile type matching - db_node_profile = profile_obj.Profile.get( - context, db_node.profile_id, project_safe=True) - node_profile_type = db_node_profile.type - if node_profile_type != cluster_profile_type: - not_match_nodes.append(db_node.id) - - found.append(db_node.id) - except (exception.ResourceNotFound, exception.MultipleChoices): - not_found.append(node) - pass - - msg = [] - if len(not_match_nodes): - msg.append(_("Profile type of nodes %s does not match that of the " - "cluster.") % not_match_nodes) - if len(owned_nodes): - msg.append(("Nodes %s already owned by some " - "cluster.") % owned_nodes) - if len(bad_nodes): - msg.append(_("Nodes are not ACTIVE: %s.") % bad_nodes) - if len(not_found): - msg.append(_("Nodes not found: %s.") % not_found) - - if msg: - msg_err = '\n'.join(msg) - LOG.error(msg_err) - raise exception.BadRequest(msg=msg_err) - - target_size = db_cluster.desired_capacity + len(found) - error = su.check_size_params(db_cluster, target_size, strict=True) - if error: - LOG.error(error) - raise exception.BadRequest(msg=error) - - params = { - 'name': 'cluster_add_nodes_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': {'nodes': found}, - } - action_id = action_mod.Action.create(context, db_cluster.id, - consts.CLUSTER_ADD_NODES, - **params) - dispatcher.start_action() - LOG.info("Cluster add nodes action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_del_nodes(self, ctx, req): - """Delete specified nodes from the named cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterDelNodesRequest object. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info("Deleting nodes '%(nodes)s' from cluster '%(cluster)s'.", - {'cluster': req.identity, 'nodes': req.nodes}) - db_cluster = co.Cluster.find(ctx, req.identity) - found = [] - not_found = [] - bad_nodes = [] - depended_nodes = [] - for node in req.nodes: - try: - db_node = node_obj.Node.find(ctx, node) - dep_nodes = db_node.dependents.get('nodes', None) - if db_node.cluster_id != db_cluster.id: - bad_nodes.append(db_node.id) - elif dep_nodes is not None: - depended_nodes.append(db_node.id) - else: - found.append(db_node.id) - except (exception.ResourceNotFound, exception.MultipleChoices): - not_found.append(node) - pass - - msg = [] - if len(depended_nodes): - reason = _("nodes %s are depended by other nodes, so can't be " - "deleted or become orphan nodes") % depended_nodes - LOG.error(reason) - raise exception.ResourceInUse(type='node', id=depended_nodes, - reason=reason) - if len(not_found): - msg.append(_("Nodes not found: %s.") % not_found) - if len(bad_nodes): - msg.append(_("Nodes not members of specified cluster: " - "%s.") % bad_nodes) - - if msg: - msg_err = '\n'.join(msg) - LOG.error(msg_err) - raise exception.BadRequest(msg=msg_err) - - target_size = db_cluster.desired_capacity - len(found) - error = su.check_size_params(db_cluster, target_size, strict=True) - if error: - LOG.error(error) - raise exception.BadRequest(msg=error) - - params = { - 'name': 'cluster_del_nodes_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': { - 'candidates': found, - 'count': len(found), - }, - } - if 'destroy_after_deletion' in req: # version 1.1 - params['inputs'].update( - {'destroy_after_deletion': req.destroy_after_deletion}) - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_DEL_NODES, - **params) - dispatcher.start_action() - LOG.info("Cluster delete nodes action queued: %s.", action_id) - - return {'action': action_id} - - def _validate_replace_nodes(self, ctx, cluster, nodes): - """Validate the nodes specified in a replacement operation. - - :param ctx: The request context. - :param cluster: The cluster object from the DB layer. - :param nodes: A dictionary wherein each key is the identity of a node - to be replaced and the corresponding value is the - identity of a node as replacement. - :returns: A dict containing the validated map of node substitutions. - """ - profile = profile_obj.Profile.get(ctx, cluster.profile_id, - project_safe=True) - - cluster_profile_type = profile.type - - found = {} - not_member = [] - owned_nodes = [] - not_found_new = [] - not_found_old = [] - bad_nodes = [] - not_match_nodes = [] - for (old_node, new_node) in nodes.items(): - try: - db_old_node = node_obj.Node.find(ctx, old_node) - except (exception.ResourceNotFound, exception.MultipleChoices): - not_found_old.append(old_node) - continue - - try: - db_new_node = node_obj.Node.find(ctx, new_node) - except (exception.ResourceNotFound, exception.MultipleChoices): - not_found_new.append(new_node) - continue - - if db_old_node.cluster_id != cluster.id: - not_member.append(old_node) - if db_new_node.cluster_id: - owned_nodes.append(new_node) - if db_new_node.status != consts.NS_ACTIVE: - bad_nodes.append(new_node) - - # check the profile type - node_profile = profile_obj.Profile.get(ctx, db_new_node.profile_id, - project_safe=True) - - if cluster_profile_type != node_profile.type: - not_match_nodes.append(new_node) - - found[db_old_node.id] = db_new_node.id - - msg = [] - if len(not_member) > 0: - msg.append(_("The specified nodes %(n)s to be replaced are not " - "members of the cluster %(c)s.") % {'n': not_member, - 'c': cluster.id}) - if len(owned_nodes) > 0: - msg.append(_("Nodes %s already member of a " - "cluster.") % owned_nodes) - if len(bad_nodes) > 0: - msg.append(_("Nodes are not ACTIVE: %s.") % bad_nodes) - if len(not_match_nodes) > 0: - msg.append(_("Profile type of nodes %s do not match that of the " - "cluster.") % not_match_nodes) - if len(not_found_old) > 0: - msg.append(_("Original nodes not found: %s.") % not_found_old) - if len(not_found_new) > 0: - msg.append(_("Replacement nodes not found: %s.") % not_found_new) - - if msg: - msg_err = '\n'.join(msg) - LOG.error(msg_err) - raise exception.BadRequest(msg=msg_err) - - return found - - @request_context - def cluster_replace_nodes(self, ctx, req): - """Replace the nodes in cluster with specified nodes - - :param ctx: An instance of the request context. - :param req: An object of ClusterReplaceNodesRequest. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info("Replace nodes of the cluster '%s'.", req.identity) - db_cluster = co.Cluster.find(ctx, req.identity) - - nodes = self._validate_replace_nodes(ctx, db_cluster, req.nodes) - kwargs = { - 'name': 'cluster_replace_nodes_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': {'candidates': nodes}, - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_REPLACE_NODES, - **kwargs) - dispatcher.start_action() - LOG.info("Cluster replace nodes action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_resize(self, ctx, req): - """Adjust cluster size parameters. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterResizeRequest object. - - :return: A dict containing the ID of an action fired. - """ - adj_type = None - number = None - min_size = None - max_size = None - min_step = None - strict = True - - if (req.obj_attr_is_set(consts.ADJUSTMENT_TYPE) and - req.adjustment_type is not None): - adj_type = req.adjustment_type - if not req.obj_attr_is_set(consts.ADJUSTMENT_NUMBER): - msg = _('Missing number value for size adjustment.') - raise exception.BadRequest(msg=msg) - - if (req.adjustment_type == consts.EXACT_CAPACITY and - req.number < 0): - msg = _("The 'number' must be non-negative integer " - "for adjustment type '%s'.") % adj_type - raise exception.BadRequest(msg=msg) - - if adj_type == consts.CHANGE_IN_PERCENTAGE: - # min_step is only used (so checked) for this case - if req.obj_attr_is_set(consts.ADJUSTMENT_MIN_STEP): - min_step = req.min_step - number = req.number - else: - number = int(req.number) - else: - if (req.obj_attr_is_set(consts.ADJUSTMENT_NUMBER) and - req.number is not None): - msg = _('Missing adjustment_type value for size adjustment.') - LOG.error(msg) - raise exception.BadRequest(msg=msg) - - if req.obj_attr_is_set(consts.ADJUSTMENT_MIN_SIZE): - min_size = req.min_size - if req.obj_attr_is_set(consts.ADJUSTMENT_MAX_SIZE): - max_size = req.max_size - if req.obj_attr_is_set(consts.ADJUSTMENT_STRICT): - strict = req.strict - - db_cluster = co.Cluster.find(ctx, req.identity) - current = node_obj.Node.count_by_cluster(ctx, db_cluster.id) - if adj_type is not None: - desired = su.calculate_desired(current, adj_type, number, min_step) - else: - desired = None - - res = su.check_size_params(db_cluster, desired, min_size, max_size, - strict) - if res: - raise exception.BadRequest(msg=res) - - LOG.info("Resizing cluster '%(cluster)s': type=%(adj_type)s, " - "number=%(number)s, min_size=%(min_size)s, " - "max_size=%(max_size)s, min_step=%(min_step)s, " - "strict=%(strict)s.", - {'cluster': req.identity, 'adj_type': adj_type, - 'number': number, 'min_size': min_size, - 'max_size': max_size, 'min_step': min_step, - 'strict': strict}) - - params = { - 'name': 'cluster_resize_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': { - consts.ADJUSTMENT_TYPE: adj_type, - consts.ADJUSTMENT_NUMBER: number, - consts.ADJUSTMENT_MIN_SIZE: min_size, - consts.ADJUSTMENT_MAX_SIZE: max_size, - consts.ADJUSTMENT_MIN_STEP: min_step, - consts.ADJUSTMENT_STRICT: strict - } - } - action_id = action_mod.Action.create( - ctx, db_cluster.id, consts.CLUSTER_RESIZE, **params) - dispatcher.start_action() - LOG.info("Cluster resize action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_scale_out(self, ctx, req): - """Inflate the size of a cluster by the given number (optional). - - :param ctx: Request context for the call. - :param req: An instance of the ClusterScaleOutRequest object. - :return: A dict with the ID of the action fired. - """ - db_cluster = co.Cluster.find(ctx, req.identity) - if req.obj_attr_is_set('count'): - if req.count == 0: - err = _("Count for scale-out request cannot be 0.") - raise exception.BadRequest(msg=err) - - err = su.check_size_params(db_cluster, - db_cluster.desired_capacity + req.count) - if err: - raise exception.BadRequest(msg=err) - - LOG.info('Scaling out cluster %(name)s by %(delta)s nodes', - {'name': req.identity, 'delta': req.count}) - inputs = {'count': req.count} - else: - LOG.info('Scaling out cluster %s', db_cluster.name) - inputs = {} - - params = { - 'name': 'cluster_scale_out_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs, - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_SCALE_OUT, - **params) - dispatcher.start_action() - LOG.info("Cluster Scale out action queued: %s", action_id) - - return {'action': action_id} - - @request_context - def cluster_scale_in(self, ctx, req): - """Deflate the size of a cluster by given number (optional). - - :param ctx: Request context for the call. - :param req: An instance of the ClusterScaleInRequest object. - :return: A dict with the ID of the action fired. - """ - db_cluster = co.Cluster.find(ctx, req.identity) - if req.obj_attr_is_set('count'): - if req.count == 0: - err = _("Count for scale-in request cannot be 0.") - raise exception.BadRequest(msg=err) - - err = su.check_size_params(db_cluster, - db_cluster.desired_capacity - req.count) - if err: - raise exception.BadRequest(msg=err) - - LOG.info('Scaling in cluster %(name)s by %(delta)s nodes', - {'name': req.identity, 'delta': req.count}) - inputs = {'count': req.count} - else: - LOG.info('Scaling in cluster %s', db_cluster.name) - inputs = {} - - params = { - 'name': 'cluster_scale_in_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs, - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_SCALE_IN, - **params) - dispatcher.start_action() - LOG.info("Cluster Scale in action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_collect(self, ctx, req): - """Collect a certain attribute across a cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterCollectRequest object. - :return: A dictionary containing values of attribute collected from all - nodes. - """ - # validate 'path' string and return a parser, - # The function may raise a BadRequest exception. - parser = utils.get_path_parser(req.path) - cluster = co.Cluster.find(ctx, req.identity) - nodes = node_obj.Node.get_all_by_cluster(ctx, cluster.id) - attrs = [] - for node in nodes: - info = node.to_dict() - if node.physical_id and 'details' in req.path: - obj = node_mod.Node.load(ctx, db_node=node) - info['details'] = obj.get_details(ctx) - - matches = [m.value for m in parser.find(info)] - if matches: - attrs.append({'id': node.id, 'value': matches[0]}) - - return {'cluster_attributes': attrs} - - @request_context - def cluster_check(self, ctx, req): - """Check the status of a cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterCheckRequest object. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info("Checking cluster '%s'.", req.identity) - db_cluster = co.Cluster.find(ctx, req.identity) - # cope with cluster check request from engine internal - if not ctx.user_id or not ctx.project_id: - ctx.user_id = db_cluster.user - ctx.project_id = db_cluster.project - - kwargs = { - 'name': 'cluster_check_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': req.params if req.obj_attr_is_set('params') else {} - } - need_delete = kwargs['inputs'].get('delete_check_action', False) - # delete some records of CLUSTER_CHECK - if need_delete: - action_obj.Action.delete_by_target( - ctx, db_cluster.id, action=[consts.CLUSTER_CHECK], - status=[consts.ACTION_SUCCEEDED, consts.ACTION_FAILED]) - - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_CHECK, - **kwargs) - dispatcher.start_action() - LOG.info("Cluster check action queued: %s.", action_id) - - return {'action': action_id} - - def _get_operation_params(self, params): - inputs = {} - - if 'operation' in params: - op_name = params.pop('operation') - if not isinstance(op_name, str): - raise exception.BadRequest( - msg="operation has to be a string") - if op_name.upper() not in consts.RECOVERY_ACTIONS: - msg = ("Operation value '{}' has to be one of the " - "following: {}." - ).format(op_name, - ', '.join(consts.RECOVERY_ACTIONS)) - raise exception.BadRequest(msg=msg) - inputs['operation'] = op_name - - if 'operation_params' in params: - op_params = params.pop('operation_params') - - if (op_name.upper() == consts.RECOVER_REBOOT): - if not isinstance(op_params, dict): - raise exception.BadRequest( - msg="operation_params must be a map") - - if (consts.REBOOT_TYPE in op_params.keys() and - op_params[consts.REBOOT_TYPE].upper() - not in consts.REBOOT_TYPES): - msg = ("Type field '{}' in operation_params has to be " - "one of the following: {}.").format( - op_params[consts.REBOOT_TYPE], - ', '.join(consts.REBOOT_TYPES)) - raise exception.BadRequest(msg=msg) - - inputs['operation_params'] = op_params - - return inputs - - @request_context - def cluster_recover(self, ctx, req): - """Recover a cluster to a healthy status. - - :param ctx: An instance of the request context. - :param req: An instance of a ClusterRecoverRequest object. - :return: A dictionary containing the ID of the action triggered. - """ - LOG.info("Recovering cluster '%s'.", req.identity) - db_cluster = co.Cluster.find(ctx, req.identity) - - # cope with cluster check request from engine internal - if not ctx.user_id or not ctx.project_id: - ctx.user_id = db_cluster.user - ctx.project_id = db_cluster.project - - inputs = {} - if req.obj_attr_is_set('params') and req.params: - inputs = self._get_operation_params(req.params) - - if 'check' in req.params: - inputs['check'] = req.params.pop('check') - - if 'check_capacity' in req.params: - inputs['check_capacity'] = req.params.pop('check_capacity') - - if len(req.params): - keys = [str(k) for k in req.params] - msg = _("Action parameter %s is not recognizable.") % keys - raise exception.BadRequest(msg=msg) - - params = { - 'name': 'cluster_recover_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_RECOVER, **params) - dispatcher.start_action() - LOG.info("Cluster recover action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_complete_lifecycle(self, ctx, req): - """Complete lifecycle for a cluster's action token - - :param ctx: Request context for the call. - :param req: An instance of the ClusterCompleteLifecycle object. - :return: A dict with the ID of the action fired. - """ - - LOG.info("Complete lifecycle for %s.", req.lifecycle_action_token) - cluster_action_mod.CompleteLifecycleProc(ctx, - req.lifecycle_action_token) - - return {'action': req.lifecycle_action_token} - - @request_context - def cluster_op(self, ctx, req): - """Perform an operation on the specified cluster. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterOperationRequest object. - :return: A dictionary containing the ID of the action triggered by the - recover request. - """ - LOG.info("Performing operation '%(o)s' on cluster '%(n)s'.", - {'o': req.operation, 'n': req.identity}) - - db_cluster = co.Cluster.find(ctx, req.identity) - cluster = cluster_mod.Cluster.load(ctx, dbcluster=db_cluster) - profile = cluster.rt['profile'] - if req.operation not in profile.OPERATIONS: - msg = _("The requested operation '%(o)s' is not supported by the " - "profile type '%(t)s'." - ) % {'o': req.operation, 't': profile.type} - raise exception.BadRequest(msg=msg) - - if req.obj_attr_is_set('params') and req.params: - params = req.params - try: - profile.OPERATIONS[req.operation].validate(req.params) - except exception.ESchema as ex: - raise exception.BadRequest(msg=str(ex)) - else: - params = {} - - if 'filters' in req and req.filters: - errors = [] - for k in req.filters: - if k not in (consts.NODE_NAME, consts.NODE_PROFILE_ID, - consts.NODE_STATUS, consts.NODE_ROLE): - errors.append(_("Filter key '%s' is unsupported") % k) - if errors: - raise exception.BadRequest(msg='\n'.join(errors)) - node_ids = node_obj.Node.ids_by_cluster(ctx, cluster.id, - filters=req.filters) - else: - node_ids = node_obj.Node.ids_by_cluster(ctx, cluster.id) - - if not node_ids: - msg = _("No node (matching the filter) could be found") - raise exception.BadRequest(msg=msg) - - kwargs = { - 'name': 'cluster_%s_%s' % (req.operation, cluster.id[:8]), - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': { - 'operation': req.operation, - 'params': params, - 'nodes': node_ids, - } - } - action_id = action_mod.Action.create( - ctx, cluster.id, consts.CLUSTER_OPERATION, **kwargs) - dispatcher.start_action() - LOG.info("Cluster operation action is queued: %s.", action_id) - return {'action': action_id} - - @request_context - def node_list(self, ctx, req): - """List node records matching the specified criteria. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeListRequest object. - :return: A list of `Node` object representations. - """ - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - if req.obj_attr_is_set('cluster_id') and req.cluster_id: - try: - db_cluster = co.Cluster.find(ctx, req.cluster_id) - except exception.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % req.cluster_id - raise exception.BadRequest(msg=msg) - query['cluster_id'] = db_cluster.id - - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - if req.obj_attr_is_set('status'): - filters['status'] = req.status - if filters: - query['filters'] = filters - - nodes = node_obj.Node.get_all(ctx, **query) - return [node.to_dict() for node in nodes] - - @request_context - def node_create(self, ctx, req): - """Create a node. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeCreateRequestBody object. - :return: A dictionary containing the details about the node and the - ID of the action triggered by this operation. - """ - cluster_id = "" - index = -1 - name_format = "" - req.obj_set_defaults() - - try: - node_profile = profile_obj.Profile.find(ctx, req.profile_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - if req.cluster_id: - try: - db_cluster = co.Cluster.find(ctx, req.cluster_id) - except (exception.ResourceNotFound, - exception.MultipleChoices) as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - # Validate profile type - if node_profile.id != db_cluster.profile_id: - cluster_profile = profile_obj.Profile.find( - ctx, db_cluster.profile_id) - if node_profile.type != cluster_profile.type: - msg = _('Node and cluster have different profile type, ' - 'operation aborted.') - raise exception.BadRequest(msg=msg) - - cluster_id = db_cluster.id - name_format = db_cluster.config.get("node.name.format", "") - index = co.Cluster.get_next_index(ctx, cluster_id) - - # we use requested name only when cluster is not specified - if cluster_id == "": - node_name = req.name - else: - node_name = utils.format_node_name(name_format, db_cluster, index) - - if CONF.name_unique: - if node_obj.Node.get_by_name(ctx, node_name): - msg = _("The node named (%(name)s) already exists." - ) % {"name": node_name} - raise exception.BadRequest(msg=msg) - - LOG.info("Creating node '%s'.", node_name) - - # Create a node instance - values = { - 'name': node_name, - 'profile_id': node_profile.id, - 'cluster_id': cluster_id or '', - 'physical_id': None, - 'index': index, - 'role': req.role or '', - 'metadata': req.metadata or {}, - 'status': consts.NS_INIT, - 'status_reason': 'Initializing', - 'data': {}, - 'dependents': {}, - 'init_at': timeutils.utcnow(True), - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - } - node = node_obj.Node.create(ctx, values) - - params = { - 'name': 'node_create_%s' % node.id[:8], - 'cluster_id': cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - } - action_id = action_mod.Action.create(ctx, node.id, - consts.NODE_CREATE, **params) - dispatcher.start_action() - LOG.info("Node create action queued: %s.", action_id) - - result = node.to_dict() - result['action'] = action_id - return result - - @request_context - def node_get(self, ctx, req): - """Retrieve the node specified. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeGetRequest object. - :return: A dictionary containing the detailed information about a node - or an exception of `ResourceNotFound` if no matching node - could be found. - """ - req.obj_set_defaults() - node = node_obj.Node.find(ctx, req.identity) - res = node.to_dict() - if req.show_details and node.physical_id: - obj = node_mod.Node.load(ctx, db_node=node) - res['details'] = obj.get_details(ctx) - return res - - @request_context - def node_update(self, ctx, req): - """Update a node with new property values. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeUpdateRequest object. - :return: A dictionary containing the updated representation of the - node along with the ID of the action triggered by this - request. - """ - LOG.info("Updating node '%s'.", req.identity) - - node = node_obj.Node.find(ctx, req.identity) - inputs = {} - if req.obj_attr_is_set('profile_id') and req.profile_id is not None: - try: - db_profile = profile_obj.Profile.find(ctx, req.profile_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - profile_id = db_profile.id - - # check if profile_type matches - old_profile = profile_obj.Profile.find(ctx, node.profile_id) - if old_profile.type != db_profile.type: - msg = _('Cannot update a node to a different profile type, ' - 'operation aborted.') - raise exception.BadRequest(msg=msg) - - if profile_id != old_profile.id: - inputs['new_profile_id'] = profile_id - - if req.obj_attr_is_set('name') and req.name: - if req.name != node.name: - inputs['name'] = req.name - if req.obj_attr_is_set('role') and req.role != node.role: - inputs['role'] = req.role - if req.obj_attr_is_set('metadata'): - if req.metadata != node.metadata: - inputs['metadata'] = req.metadata - if req.obj_attr_is_set('tainted'): - if req.tainted != node.tainted: - inputs['tainted'] = req.tainted - - if not inputs: - msg = _("No property needs an update.") - raise exception.BadRequest(msg=msg) - - params = { - 'name': 'node_update_%s' % node.id[:8], - 'cluster_id': node.cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs, - } - action_id = action_mod.Action.create(ctx, node.id, consts.NODE_UPDATE, - **params) - dispatcher.start_action() - LOG.info("Node update action is queued: %s.", action_id) - - resp = node.to_dict() - resp['action'] = action_id - - return resp - - @request_context - def node_delete(self, ctx, req): - """Delete the specified node. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeDeleteRequest object. - :return: A dictionary containing the ID of the action triggered by - this request. - """ - LOG.info('Deleting node %s', req.identity) - - node = node_obj.Node.find(ctx, req.identity) - - force = False - if req.obj_attr_is_set(consts.NODE_DELETE_FORCE): - force = req.force - - if (not force and - node.status in [consts.NS_CREATING, - consts.NS_UPDATING, - consts.NS_DELETING, - consts.NS_RECOVERING]): - raise exception.ActionInProgress(type='node', id=req.identity, - status=node.status) - - nodes = node.dependents.get('nodes', None) - if nodes is not None and len(nodes) > 0: - reason = _("still depended by other clusters and/or nodes") - raise exception.ResourceInUse(type='node', id=req.identity, - reason=reason) - - params = { - 'name': 'node_delete_%s' % node.id[:8], - 'cluster_id': node.cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - } - action_id = action_mod.Action.create(ctx, node.id, - consts.NODE_DELETE, **params) - dispatcher.start_action() - LOG.info("Node delete action is queued: %s.", action_id) - - return {'action': action_id} - - def _node_adopt_preview(self, ctx, req): - """Preview version of node adoption (internal version). - - :param ctx: An instance of the request context. - :param req: An instance of the NodeAdoptPreviewRequest or the - NodeAdoptRequest object. - :returns: A tuple containing the profile class and the spec for the - node that can be adopted. - :raises: BadRequest(404) if profile type not found; or - InternalServerError(500) if profile operation failed. - """ - - # Apply default settings on the request - req.obj_set_defaults() - - try: - profile_cls = environment.global_env().get_profile(req.type) - except exception.ResourceNotFound as ex: - raise exception.BadRequest(msg=str(ex)) - - # NOTE: passing in context to avoid loading runtime data - temp_node = node_mod.Node('adopt', 'TBD', physical_id=req.identity, - context=ctx) - # TODO(Qiming): return node status and created timestamp - # TODO(Qiming): pass 'preview' into 'adopt_node' so that we don't - # blindly create snapshots. - spec = profile_base.Profile.adopt_node(ctx, temp_node, req.type, - overrides=req.overrides, - snapshot=req.snapshot) - if 'Error' in spec: - err = '%s: %s' % (spec['Error']['code'], spec['Error']['message']) - raise exception.ProfileOperationFailed(message=err) - - parts = req.type.split('-') - res = { - 'type': parts[0], - 'version': parts[1], - 'properties': spec - } - return profile_cls, res - - @request_context - def node_adopt_preview(self, ctx, req): - """Preview a node adoption operation. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeAdoptPreviewRequest object. - :returns: A dict containing the properties of a spec. - """ - LOG.info("Adopting node '%s' (preview).", req.identity) - _, spec = self._node_adopt_preview(ctx, req) - return {'node_preview': spec} - - @request_context - def node_adopt(self, ctx, req): - """Adopt a node into senlin's management. - - :param ctx: An instance of the request context. - :param req: An NodeAdoptRequest object. - :returns: A dict containing information about the node created by - adopting an existing physical resource. - """ - LOG.info("Adopting node '%s'.", req.identity) - - # check name uniqueness if needed - if req.obj_attr_is_set('name') and req.name: - name = req.name - if CONF.name_unique and node_obj.Node.get_by_name(ctx, name): - msg = _("The node named (%s) already exists.") % name - raise exception.BadRequest(msg=msg) - else: - name = 'node-' + utils.random_name() - - # create spec using preview - profile_cls, spec = self._node_adopt_preview(ctx, req) - # create profile - profile = profile_cls.create(ctx, "prof-%s" % name, spec) - if req.obj_attr_is_set('metadata'): - metadata = req.metadata - else: - metadata = {} - # Create a node instance - values = { - 'name': name, - 'data': {}, - 'dependents': {}, - 'profile_id': profile.id, - 'cluster_id': '', - 'physical_id': req.identity, - 'index': -1, - 'role': '', - 'metadata': metadata, - # TODO(Qiming): Set node status properly - 'status': consts.NS_ACTIVE, - 'status_reason': 'Node adopted successfully', - 'init_at': timeutils.utcnow(True), - 'created_at': timeutils.utcnow(True), - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - } - node = node_obj.Node.create(ctx, values) - - # TODO(Qiming): set cluster_node_id metadata - LOG.info("Adopted node '%(rid)s' as '%(id)s'.", - {'rid': req.identity, 'id': node.id}) - return node.to_dict() - - @request_context - def node_check(self, ctx, req): - """Check the health status of specified node. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeCheckRequest object. - :return: A dictionary containing the ID of the action triggered by - this request. - """ - LOG.info("Checking node '%s'.", req.identity) - - db_node = node_obj.Node.find(ctx, req.identity) - - kwargs = { - 'name': 'node_check_%s' % db_node.id[:8], - 'cluster_id': db_node.cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY - } - if req.obj_attr_is_set('params') and req.params: - kwargs['inputs'] = req.params - action_id = action_mod.Action.create(ctx, db_node.id, - consts.NODE_CHECK, **kwargs) - dispatcher.start_action() - LOG.info("Node check action is queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def node_recover(self, ctx, req): - """Recover the specified node. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeRecoverRequest object. - :return: A dictionary containing the ID of the action triggered by the - recover request. - """ - LOG.info("Recovering node '%s'.", req.identity) - - db_node = node_obj.Node.find(ctx, req.identity) - - kwargs = { - 'name': 'node_recover_%s' % db_node.id[:8], - 'cluster_id': db_node.cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': {} - } - if req.obj_attr_is_set('params') and req.params: - kwargs['inputs'] = self._get_operation_params(req.params) - - if 'check' in req.params: - kwargs['inputs']['check'] = req.params.pop('check') - - if 'delete_timeout' in req.params: - kwargs['inputs']['delete_timeout'] = req.params.pop( - 'delete_timeout') - - if 'force_recreate' in req.params: - kwargs['inputs']['force_recreate'] = req.params.pop( - 'force_recreate') - - if len(req.params): - keys = [str(k) for k in req.params] - msg = _("Action parameter %s is not recognizable." - ) % keys - raise exception.BadRequest(msg=msg) - - action_id = action_mod.Action.create(ctx, db_node.id, - consts.NODE_RECOVER, **kwargs) - dispatcher.start_action() - LOG.info("Node recover action is queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def node_op(self, ctx, req): - """Perform an operation on the specified node. - - :param ctx: An instance of the request context. - :param req: An instance of the NodeOperationRequest object. - :return: A dictionary containing the ID of the action triggered by the - operation request. - """ - LOG.info("Performing operation '%(o)s' on node '%(n)s'.", - {'o': req.operation, 'n': req.identity}) - - db_node = node_obj.Node.find(ctx, req.identity) - node = node_mod.Node.load(ctx, db_node=db_node) - - profile = node.rt['profile'] - if req.operation not in profile.OPERATIONS: - msg = _("The requested operation '%(o)s' is not supported by the " - "profile type '%(t)s'." - ) % {'o': req.operation, 't': profile.type} - raise exception.BadRequest(msg=msg) - - params = {} - if req.obj_attr_is_set('params') and req.params: - params = req.params - try: - profile.OPERATIONS[req.operation].validate(req.params) - except exception.ESchema as ex: - raise exception.BadRequest(msg=str(ex)) - - kwargs = { - 'name': 'node_%s_%s' % (req.operation, db_node.id[:8]), - 'cluster_id': db_node.cluster_id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': { - 'operation': req.operation, - 'params': params - } - } - action_id = action_mod.Action.create(ctx, db_node.id, - consts.NODE_OPERATION, **kwargs) - dispatcher.start_action() - LOG.info("Node operation action is queued: %s.", action_id) - return {'action': action_id} - - @request_context - def cluster_policy_list(self, ctx, req): - """List cluster-policy bindings given the cluster identity. - - :param ctx: An instance of the request context. - :param req: An instance of the ClusterPolicyListRequest object. - :return: A list containing dictionaries each representing a binding. - """ - sort = None - if req.obj_attr_is_set('sort'): - sort = req.sort - filters = {} - if req.obj_attr_is_set('policy_name'): - filters['policy_name'] = req.policy_name - if req.obj_attr_is_set('policy_type'): - filters['policy_type'] = req.policy_type - if req.obj_attr_is_set('enabled'): - filters['enabled'] = req.enabled - - db_cluster = co.Cluster.find(ctx, req.identity) - bindings = cp_obj.ClusterPolicy.get_all( - ctx, db_cluster.id, filters=filters, sort=sort) - - return [binding.to_dict() for binding in bindings] - - @request_context - def cluster_policy_get(self, ctx, req): - """Get the binding record giving the cluster and policy identity. - - :param ctx: An instance of request context. - :param req: An instance of the ClusterPolicyGetRequest object. - :return: A dictionary containing the binding record, or raises an - exception of ``PolicyBindingNotFound``. - """ - identity = req.identity - policy_id = req.policy_id - db_cluster = co.Cluster.find(ctx, identity) - db_policy = policy_obj.Policy.find(ctx, policy_id) - - binding = cp_obj.ClusterPolicy.get(ctx, db_cluster.id, db_policy.id) - if binding is None: - raise exception.PolicyBindingNotFound(policy=policy_id, - identity=identity) - - return binding.to_dict() - - @request_context - def cluster_policy_attach(self, ctx, req): - """Attach a policy to the specified cluster. - - This is done via an action because a cluster lock is needed. - - :param ctx: An instance of request context. - :param req: An instance of the ClusterAttachPolicyRequest object. - :return: A dictionary contains the ID of the action fired. - """ - LOG.info("Attaching policy (%(policy)s) to cluster " - "(%(cluster)s).", - {'policy': req.policy_id, 'cluster': req.identity}) - - db_cluster = co.Cluster.find(ctx, req.identity) - try: - db_policy = policy_obj.Policy.find(ctx, req.policy_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - req.obj_set_defaults() - - params = { - 'name': 'attach_policy_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': { - 'policy_id': db_policy.id, - 'enabled': req.enabled, - } - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_ATTACH_POLICY, - **params) - dispatcher.start_action() - LOG.info("Policy attach action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_policy_detach(self, ctx, req): - """Detach a policy from the specified cluster. - - This is done via an action because cluster lock is needed. - - :param ctx: An instance of request context. - :param req: An instance of the ClusterDetachPolicyRequest object. - :return: A dictionary contains the ID of the action fired. - """ - LOG.info("Detaching policy '%(policy)s' from cluster " - "'%(cluster)s'.", - {'policy': req.policy_id, 'cluster': req.identity}) - - db_cluster = co.Cluster.find(ctx, req.identity) - try: - db_policy = policy_obj.Policy.find(ctx, req.policy_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - binding = cp_obj.ClusterPolicy.get(ctx, db_cluster.id, db_policy.id) - if binding is None: - msg = _("The policy '%(p)s' is not attached to the specified " - "cluster '%(c)s'." - ) % {'p': req.policy_id, 'c': req.identity} - raise exception.BadRequest(msg=msg) - - params = { - 'name': 'detach_policy_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': {'policy_id': db_policy.id}, - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_DETACH_POLICY, - **params) - dispatcher.start_action() - LOG.info("Policy detach action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def cluster_policy_update(self, ctx, req): - """Update an existing policy binding on a cluster. - - This is done via an action because cluster lock is needed. - - :param context: An instance of request context. - :param req: An instance of the ClusterUpdatePolicyRequest object. - :return: A dictionary contains the ID of the action fired. - """ - LOG.info("Updating policy '%(policy)s' on cluster '%(cluster)s.'", - {'policy': req.policy_id, 'cluster': req.identity}) - - db_cluster = co.Cluster.find(ctx, req.identity) - try: - db_policy = policy_obj.Policy.find(ctx, req.policy_id) - except exception.ResourceNotFound as ex: - msg = ex.enhance_msg('specified', ex) - raise exception.BadRequest(msg=msg) - - binding = cp_obj.ClusterPolicy.get(ctx, db_cluster.id, db_policy.id) - if binding is None: - msg = _("The policy '%(p)s' is not attached to the specified " - "cluster '%(c)s'." - ) % {'p': req.policy_id, 'c': req.identity} - raise exception.BadRequest(msg=msg) - - inputs = {'policy_id': db_policy.id} - if req.obj_attr_is_set('enabled'): - inputs['enabled'] = req.enabled - - params = { - 'name': 'update_policy_%s' % db_cluster.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': inputs - } - action_id = action_mod.Action.create(ctx, db_cluster.id, - consts.CLUSTER_UPDATE_POLICY, - **params) - dispatcher.start_action() - LOG.info("Policy update action queued: %s.", action_id) - - return {'action': action_id} - - @request_context - def action_list(self, ctx, req): - """List action records matching the specified criteria. - - :param ctx: An instance of the request context. - :param req: An instance of the ActionListRequest object. - :return: A list of `Action` object representations. - """ - - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - # add filter with cluster_id - if req.obj_attr_is_set('cluster_id'): - cluster_ids = [] - for cid in req.cluster_id: - try: - cluster = co.Cluster.find(ctx, cid) - cluster_ids.append(cluster.id) - except exception.ResourceNotFound: - return [] - if len(cluster_ids) > 0: - filters['cluster_id'] = cluster_ids - if req.obj_attr_is_set('action'): - filters['action'] = req.action - if req.obj_attr_is_set('target'): - filters['target'] = req.target - if req.obj_attr_is_set('status'): - filters['status'] = req.status - if filters: - query['filters'] = filters - - actions = action_obj.Action.get_all(ctx, **query) - - return [a.to_dict() for a in actions] - - @request_context - def action_create(self, ctx, req): - """Create an action with given details. - - :param ctx: An instance of the request context. - :param req: An instance of the ActionCreateRequestBody object. - :return: A dictionary containing the details about the action and the - ID of the action triggered by this operation. - """ - LOG.info("Creating action '%s'.", req.name) - - req.obj_set_defaults() - try: - target = co.Cluster.find(ctx, req.cluster_id) - except exception.ResourceNotFound: - msg = _("Cannot find the given cluster: %s") % req.cluster_id - raise exception.BadRequest(msg=msg) - - # Create an action instance - params = { - 'name': req.name, - 'cluster_id': target.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': req.inputs or {}, - } - action_id = action_mod.Action.create(ctx, target.id, req.action, - **params) - - # TODO(Anyone): Uncomment this to notify the dispatcher - # dispatcher.start_action(action_id=action.id) - - LOG.info("Action '%(name)s' is created: %(id)s.", - {'name': req.name, 'id': action_id}) - return {'action': action_id} - - @request_context - def action_get(self, ctx, req): - """Retrieve the action specified. - - :param ctx: An instance of the request context. - :param req: An instance of the ActionGetRequest object. - :return: A dictionary containing the detailed information about a - action or an exception of `ResourceNotFound` if no matching - action could be found. - """ - action = action_obj.Action.find(ctx, req.identity) - return action.to_dict() - - @request_context - def action_delete(self, ctx, req): - """Delete the specified action object. - - :param ctx: An instance of the request context. - :param req: An instance of the ActionDeleteRequest object. - :return: None if deletion was successful, or an exception of type - `ResourceInUse`. - """ - db_action = action_obj.Action.find(ctx, req.identity) - LOG.info("Deleting action '%s'.", req.identity) - try: - action_mod.Action.delete(ctx, db_action.id) - except exception.EResourceBusy: - reason = _("still in one of WAITING, RUNNING or SUSPENDED state") - raise exception.ResourceInUse(type='action', id=req.identity, - reason=reason) - - LOG.info("Action '%s' is deleted.", req.identity) - - @request_context - def action_update(self, ctx, req): - """Update the specified action object. - - :param ctx: An instance of the request context. - :param req: An instance of the ActionUpdateRequest object. - :return: None if update was successful, or an exception of type - `BadRequest`. - """ - # Only allow cancellation of actions at this time. - if req.status == consts.ACTION_CANCELLED: - action = action_mod.Action.load(ctx, req.identity, - project_safe=False) - if req.force: - action.force_cancel() - else: - LOG.info("Signaling action '%s' to Cancel.", req.identity) - action.signal_cancel() - else: - msg = ("Unknown status %(status)s for action %(action)s" % - {"status": req.status, "action": req.identity}) - raise exception.BadRequest(msg=msg) - - @request_context - def receiver_list(self, ctx, req): - """List receivers matching the specified criteria. - - :param ctx: An instance of the request context. - :param req: An instance of the ReceiverListRequest object. - :return: A list of `Receiver` object representations. - """ - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - - filters = {} - if req.obj_attr_is_set('name'): - filters['name'] = req.name - if req.obj_attr_is_set('type'): - filters['type'] = req.type - if req.obj_attr_is_set('action'): - filters['action'] = req.action - if req.obj_attr_is_set('cluster_id'): - filters['cluster_id'] = req.cluster_id - if req.obj_attr_is_set('user'): - filters['user'] = req.user - if filters: - query['filters'] = filters - - receivers = receiver_obj.Receiver.get_all(ctx, **query) - return [r.to_dict() for r in receivers] - - @request_context - def receiver_create(self, ctx, req): - """Create a receiver. - - :param ctx: An instance of the request context. - :param req: An instance of the ReceiverCreateRequestBody object. - :return: A dictionary containing the details about the receiver - created. - """ - if CONF.name_unique: - if receiver_obj.Receiver.get_by_name(ctx, req.name): - msg = _("A receiver named '%s' already exists.") % req.name - raise exception.BadRequest(msg=msg) - - LOG.info("Creating %(t)s receiver %(n)s.", - {'n': req.name, 't': req.type}) - - req.obj_set_defaults() - # Sanity check for webhook target - cluster = None - action = None - if req.type == consts.RECEIVER_WEBHOOK: - if not req.obj_attr_is_set('cluster_id') or req.cluster_id is None: - msg = _("Cluster identity is required for creating " - "webhook receiver.") - raise exception.BadRequest(msg=msg) - - if not req.obj_attr_is_set('action') or req.action is None: - msg = _("Action name is required for creating webhook " - "receiver.") - raise exception.BadRequest(msg=msg) - action = req.action - invalid_actions = [consts.CLUSTER_CREATE] - if action in invalid_actions: - msg = _("Action name cannot be any of %s.") % invalid_actions - raise exception.BadRequest(msg=msg) - - # Check whether cluster identified by cluster_id does exist - try: - cluster = co.Cluster.find(ctx, req.cluster_id) - except (exception.ResourceNotFound, - exception.MultipleChoices) as ex: - msg = ex.enhance_msg('referenced', ex) - raise exception.BadRequest(msg=msg) - - # permission checking - if not ctx.is_admin and ctx.user_id != cluster.user: - raise exception.Forbidden() - - kwargs = { - 'name': req.name, - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - 'params': req.params - } - - receiver = receiver_mod.Receiver.create(ctx, req.type, cluster, - action, **kwargs) - LOG.info("Receiver (%(n)s) is created: %(i)s.", - {'n': req.name, 'i': receiver.id}) - - return receiver.to_dict() - - @request_context - def receiver_get(self, ctx, req): - """Get the details about a receiver. - - :param ctx: An instance of the request context. - :param req: An instance of the ReceiverGetRequest object. - :return: A dictionary containing the details about a receiver or - an exception `ResourceNotFound` if no matching object found. - """ - # NOTE: Temporary code to make tempest tests about webhook_trigger - # pass, will remove in latter patches. - kwargs = {} - if ctx.is_admin is True: - kwargs['project_safe'] = False - - receiver = receiver_obj.Receiver.find(ctx, req.identity, **kwargs) - return receiver.to_dict() - - @request_context - def receiver_update(self, ctx, req): - """Update the properties of a given receiver - - :param ctx: An instance of request context. - :param req: An instance of the ReceiverUpdateRequest. - :returns: A dictionary containing the receiver details of the updated - receiver, or an exception `ResourceNotFound` if no matching - receiver is found. - """ - LOG.info("Updating receiver '%(id)s'.", {'id': req.identity}) - db_receiver = receiver_obj.Receiver.find(ctx, req.identity) - receiver = receiver_mod.Receiver.load(ctx, receiver_obj=db_receiver) - changed = False - if (req.obj_attr_is_set('name') and req.name is not None): - if req.name != receiver.name: - receiver.name = req.name - changed = True - if (req.obj_attr_is_set('action') and req.action is not None): - if req.action != receiver.action: - receiver.action = req.action - changed = True - if (req.obj_attr_is_set('params') and req.params is not None): - if req.params != receiver.params: - receiver.params = req.params - changed = True - if changed: - receiver.store(ctx, update=True) - else: - msg = _("No property needs an update.") - raise exception.BadRequest(msg=msg) - LOG.info("Receiver '%(id)s' is updated.", {'id': req.identity}) - return receiver.to_dict() - - @request_context - def receiver_delete(self, ctx, req): - """Delete the specified receiver. - - :param ctx: An instance of the request context. - :param req: An instance of the ReceiverDeleteRequest object. - :return: None if successfully deleted the receiver or an exception of - `ResourceNotFound` if the object could not be found. - """ - db_receiver = receiver_obj.Receiver.find(ctx, req.identity) - LOG.info("Deleting receiver %s.", req.identity) - receiver_mod.Receiver.delete(ctx, db_receiver.id) - LOG.info("Receiver %s is deleted.", req.identity) - - @request_context - def receiver_notify(self, ctx, req): - """Handle notification to specified receiver. - - :param ctx: An instance of the request context. - :param req: An instance of the ReceiverNotifyRequest object. - """ - db_receiver = receiver_obj.Receiver.find(ctx, req.identity) - # permission checking - if not ctx.is_admin and ctx.user_id != db_receiver.user: - raise exception.Forbidden() - - # Receiver type check - if db_receiver.type != consts.RECEIVER_MESSAGE: - msg = _("Notifying non-message receiver is not allowed.") - raise exception.BadRequest(msg=msg) - - LOG.info("Received notification to receiver %s.", req.identity) - receiver = receiver_mod.Receiver.load(ctx, - receiver_obj=db_receiver, - project_safe=True) - receiver.notify(ctx) - - @request_context - def webhook_trigger(self, ctx, req): - """trigger the webhook. - - :param ctx: An instance of the request context. - :param req: An instance of the WebhookTriggerRequest object. - :return: A dictionary contains the ID of the action fired. - """ - identity = req.identity - if hasattr(req.body, 'params'): - # API version < 1.10 - params = req.body.params - else: - params = req.body - - LOG.info("Triggering webhook (%s)", identity) - receiver = receiver_obj.Receiver.find(ctx, identity) - - try: - db_cluster = co.Cluster.find(ctx, receiver.cluster_id) - except (exception.ResourceNotFound, exception.MultipleChoices) as ex: - msg = ex.enhance_msg('referenced', ex) - raise exception.BadRequest(msg=msg) - - data = copy.deepcopy(receiver.params) - if data is None: - data = {} - if params: - data.update(params) - - kwargs = { - 'name': 'webhook_%s' % receiver.id[:8], - 'cluster_id': db_cluster.id, - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': data - } - - action_id = action_mod.Action.create(ctx, db_cluster.id, - receiver.action, **kwargs) - dispatcher.start_action() - LOG.info("Webhook %(w)s triggered with action queued: %(a)s.", - {'w': identity, 'a': action_id}) - - return {'action': action_id} - - @request_context - def event_list(self, ctx, req): - """List event records matching the specified criteria. - - :param ctx: An instance of the request context. - :param req: An instance of the EventListRequest object. - :return: A list of `Event` object representations. - """ - - req.obj_set_defaults() - if not req.project_safe and not ctx.is_admin: - raise exception.Forbidden() - - query = {'project_safe': req.project_safe} - if req.obj_attr_is_set('limit'): - query['limit'] = req.limit - if req.obj_attr_is_set('marker'): - query['marker'] = req.marker - if req.obj_attr_is_set('sort') and req.sort is not None: - query['sort'] = req.sort - - filters = {} - if req.obj_attr_is_set('oid'): - filters['oid'] = req.oid - if req.obj_attr_is_set('oname'): - filters['oname'] = req.oname - if req.obj_attr_is_set('otype'): - filters['otype'] = req.otype - if req.obj_attr_is_set('action'): - filters['action'] = req.action - if req.obj_attr_is_set('level'): - filters['level'] = req.level - if req.obj_attr_is_set('cluster_id'): - cluster_ids = [] - for cid in req.cluster_id: - try: - cluster = co.Cluster.find(ctx, cid) - cluster_ids.append(cluster.id) - except exception.ResourceNotFound: - return [] - if len(cluster_ids) > 0: - filters['cluster_id'] = cluster_ids - if filters: - query['filters'] = filters - - if filters and consts.EVENT_LEVEL in filters: - value = filters.pop(consts.EVENT_LEVEL) - value = utils.parse_level_values(value) - if value is not None: - filters[consts.EVENT_LEVEL] = value - - all_events = event_obj.Event.get_all(ctx, **query) - - results = [] - for event in all_events: - evt = event.as_dict() - level = utils.level_from_number(evt['level']) - evt['level'] = level - results.append(evt) - - return results - - @request_context - def event_get(self, ctx, req): - """Retrieve the event specified. - - :param ctx: An instance of the request context. - :param req: An instance of the EventGetRequest object. - :return: A dictionary containing the detailed information about a - event or an exception of `ResourceNotFound` if no matching - event could be found. - """ - - db_event = event_obj.Event.find(ctx, req.identity) - evt = db_event.as_dict() - level = utils.level_from_number(evt['level']) - evt['level'] = level - - return evt diff --git a/senlin/conf/__init__.py b/senlin/conf/__init__.py deleted file mode 100644 index b38a2b7c9..000000000 --- a/senlin/conf/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.conf import api -from senlin.conf import authentication -from senlin.conf import base -from senlin.conf import conductor -from senlin.conf import dispatchers -from senlin.conf import engine -from senlin.conf import health_manager -from senlin.conf import notification -from senlin.conf import receiver -from senlin.conf import revision -from senlin.conf import zaqar - -CONF = cfg.CONF - -api.register_opts(CONF) -authentication.register_opts(CONF) -base.register_opts(CONF) -conductor.register_opts(CONF) -dispatchers.register_opts(CONF) -engine.register_opts(CONF) -health_manager.register_opts(CONF) -notification.register_opts(CONF) -receiver.register_opts(CONF) -revision.register_opts(CONF) -zaqar.register_opts(CONF) diff --git a/senlin/conf/api.py b/senlin/conf/api.py deleted file mode 100644 index 8f0358597..000000000 --- a/senlin/conf/api.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - - -API_GROUP = cfg.OptGroup('senlin_api') -API_OPTS = [ - cfg.IPOpt('bind_host', default='0.0.0.0', - help=_('Address to bind the server. Useful when ' - 'selecting a particular network interface.')), - cfg.PortOpt('bind_port', default=8777, - help=_('The port on which the server will listen.')), - cfg.IntOpt('backlog', default=4096, - help=_("Number of backlog requests " - "to configure the socket with.")), - cfg.StrOpt('cert_file', - help=_("Location of the SSL certificate file " - "to use for SSL mode.")), - cfg.StrOpt('key_file', - help=_("Location of the SSL key file to use " - "for enabling SSL mode.")), - cfg.IntOpt('workers', min=0, default=0, - help=_("Number of workers for Senlin service.")), - cfg.IntOpt('max_header_line', default=16384, - help=_('Maximum line size of message headers to be accepted. ' - 'max_header_line may need to be increased when using ' - 'large tokens (typically those generated by the ' - 'Keystone v3 API with big service catalogs).')), - cfg.IntOpt('tcp_keepidle', default=600, - help=_('The value for the socket option TCP_KEEPIDLE. This is ' - 'the time in seconds that the connection must be idle ' - 'before TCP starts sending keepalive probes.')), - cfg.StrOpt('api_paste_config', default="api-paste.ini", - deprecated_group='paste_deploy', - help=_("The API paste config file to use.")), - cfg.BoolOpt('wsgi_keep_alive', default=True, - deprecated_group='eventlet_opts', - help=_("If false, closes the client socket explicitly.")), - cfg.IntOpt('client_socket_timeout', default=900, - deprecated_group='eventlet_opts', - help=_("Timeout for client connections' socket operations. " - "If an incoming connection is idle for this number of " - "seconds it will be closed. A value of '0' indicates " - "waiting forever.")), - cfg.IntOpt('max_json_body_size', default=1048576, - deprecated_group='DEFAULT', - help=_('Maximum raw byte size of JSON request body.')), -] - - -def register_opts(conf): - conf.register_group(API_GROUP) - conf.register_opts(API_OPTS, group=API_GROUP) - - -def list_opts(): - return { - API_GROUP: API_OPTS, - } diff --git a/senlin/conf/authentication.py b/senlin/conf/authentication.py deleted file mode 100644 index 1a2617ccc..000000000 --- a/senlin/conf/authentication.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg - -from senlin.common.i18n import _ - -AUTHENTICATION_GROUP = cfg.OptGroup('authentication') -AUTHENTICATION_OPTS = [ - cfg.StrOpt('auth_url', default='', - help=_('Complete identity V3 API endpoint.')), - cfg.StrOpt('service_username', default='senlin', - help=_('Senlin service user name.')), - cfg.StrOpt('service_password', default='', secret=True, - help=_('Password specified for the Senlin service user.')), - cfg.StrOpt('service_project_name', default='service', - help=_('Name of the service project.')), - cfg.StrOpt('service_user_domain', default='Default', - help=_('Name of the domain for the service user.')), - cfg.StrOpt('service_project_domain', default='Default', - help=_('Name of the domain for the service project.')), - cfg.BoolOpt('verify_ssl', default=True, - help=_('Verify HTTPS connections.')), - cfg.StrOpt('interface', default='public', - help=_('Interface to use for the API endpoints.')), -] - - -def register_opts(conf): - conf.register_group(AUTHENTICATION_GROUP) - conf.register_opts(AUTHENTICATION_OPTS, group=AUTHENTICATION_GROUP) - ks_loading.register_session_conf_options(cfg.CONF, 'authentication') - - -def list_opts(): - return { - AUTHENTICATION_GROUP: AUTHENTICATION_OPTS - } diff --git a/senlin/conf/base.py b/senlin/conf/base.py deleted file mode 100644 index 385eef6de..000000000 --- a/senlin/conf/base.py +++ /dev/null @@ -1,125 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import socket - -from oslo_config import cfg - -from senlin.common.i18n import _ - -SENLIN_OPTS = [ - cfg.HostAddressOpt('host', - default=socket.gethostname(), - help=_('Name of the engine node. This can be an opaque ' - 'identifier. It is not necessarily a hostname, ' - 'FQDN or IP address.')), - cfg.StrOpt('default_region_name', - help=_('Default region name used to get services endpoints.')), - cfg.IntOpt('max_response_size', - default=524288, - help=_('Maximum raw byte size of data from web response.')), - cfg.ListOpt('notification_topics', - default=['versioned_notifications'], - help=_('Default notification topic.')), -] - -ENGINE_OPTS = [ - cfg.IntOpt('periodic_interval', - default=60, - help=_('Seconds between running periodic tasks.')), - cfg.IntOpt('check_interval_max', - default=3600, - help=_('Maximum seconds between cluster check to be called.')), - cfg.IntOpt('health_check_interval_min', - default=60, - help=_('Minimum seconds between health check to be called.')), - cfg.StrOpt('environment_dir', - default='/etc/senlin/environments', - help=_('The directory to search for environment files.')), - cfg.IntOpt('max_nodes_per_cluster', - default=1000, - help=_('Maximum nodes allowed per top-level cluster.')), - cfg.IntOpt('max_clusters_per_project', - default=100, - help=_('Maximum number of clusters any one project may have' - ' active at one time.')), - cfg.IntOpt('default_action_timeout', - default=3600, - help=_('Timeout in seconds for actions.')), - cfg.IntOpt('default_nova_timeout', - default=600, - help=_('Timeout in seconds for nova API calls.')), - cfg.IntOpt('max_actions_per_batch', - default=0, - help=_('Maximum number of node actions that each engine worker ' - 'can schedule consecutively per batch. 0 means no ' - 'limit.')), - cfg.IntOpt('batch_interval', - default=3, - help=_('Seconds to pause between scheduling two consecutive ' - 'batches of node actions.')), - cfg.IntOpt('lock_retry_times', - default=3, - help=_('Number of times trying to grab a lock.')), - cfg.IntOpt('lock_retry_interval', - default=10, - help=_('Number of seconds between lock retries.')), - cfg.IntOpt('database_retry_limit', - default=10, - help=_('Number of times retrying a failed operation on the ' - 'database.')), - cfg.IntOpt('database_retry_interval', - default=0.3, - help=_('Initial number of seconds between database retries.')), - cfg.IntOpt('database_max_retry_interval', - default=2, - help=_('Maximum number of seconds between database retries.')), - cfg.IntOpt('engine_life_check_timeout', - default=2, - help=_('RPC timeout for the engine liveness check that is used' - ' for cluster locking.')), - cfg.BoolOpt('name_unique', - default=False, - help=_('Flag to indicate whether to enforce unique names for ' - 'Senlin objects belonging to the same project.')), - cfg.IntOpt('service_down_time', - default=60, - help=_('Maximum time since last check-in for a service to be ' - 'considered up.')), - cfg.ListOpt('trust_roles', - default=[], - help=_('The roles which are delegated to the trustee by the ' - 'trustor when a cluster is created.')), -] - -CLOUD_BACKEND_OPTS = [ - cfg.StrOpt('cloud_backend', default='openstack', - choices=("openstack", "openstack_test"), - help=_('Default cloud backend to use.')), -] - -EVENT_OPTS = [ - cfg.MultiStrOpt("event_dispatchers", default=['database'], - help=_("Event dispatchers to enable.")), -] - - -def register_opts(conf): - conf.register_opts(SENLIN_OPTS) - conf.register_opts(ENGINE_OPTS) - conf.register_opts(CLOUD_BACKEND_OPTS) - conf.register_opts(EVENT_OPTS) - - -def list_opts(): - return { - 'DEFAULT': SENLIN_OPTS + ENGINE_OPTS + CLOUD_BACKEND_OPTS + EVENT_OPTS - } diff --git a/senlin/conf/conductor.py b/senlin/conf/conductor.py deleted file mode 100644 index f326f0704..000000000 --- a/senlin/conf/conductor.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - - -CONDUCTOR_GROUP = cfg.OptGroup('conductor') -CONDUCTOR_OPTS = [ - cfg.IntOpt('workers', - default=1, - help=_('Number of senlin-conductor processes.')), - cfg.IntOpt('threads', - default=1000, - help=_('Number of senlin-conductor threads.')), -] - - -def register_opts(conf): - conf.register_group(CONDUCTOR_GROUP) - conf.register_opts(CONDUCTOR_OPTS, group=CONDUCTOR_GROUP) - - -def list_opts(): - return { - CONDUCTOR_GROUP: CONDUCTOR_OPTS, - } diff --git a/senlin/conf/dispatchers.py b/senlin/conf/dispatchers.py deleted file mode 100644 index fb8bc6023..000000000 --- a/senlin/conf/dispatchers.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - -DISPATCHERS_GROUP = cfg.OptGroup('dispatchers') -DISPATCHERS_OPTS = [ - cfg.StrOpt('priority', default='info', - choices=("critical", "error", "warning", "info", "debug"), - help=_("Lowest event priorities to be dispatched.")), - cfg.BoolOpt("exclude_derived_actions", default=True, - help=_("Exclude derived actions from events dumping.")), -] - - -def register_opts(conf): - conf.register_group(DISPATCHERS_GROUP) - conf.register_opts(DISPATCHERS_OPTS, group=DISPATCHERS_GROUP) - - -def list_opts(): - return { - DISPATCHERS_GROUP: DISPATCHERS_OPTS - } diff --git a/senlin/conf/engine.py b/senlin/conf/engine.py deleted file mode 100644 index b743672cc..000000000 --- a/senlin/conf/engine.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - - -ENGINE_GROUP = cfg.OptGroup('engine') -ENGINE_OPTS = [ - cfg.IntOpt('workers', - default=1, - deprecated_name='num_engine_workers', - deprecated_group="DEFAULT", - help=_('Number of senlin-engine processes.')), - cfg.IntOpt('threads', - default=1000, - deprecated_name='scheduler_thread_pool_size', - deprecated_group="DEFAULT", - help=_('Number of senlin-engine threads.')), -] - - -def register_opts(conf): - conf.register_group(ENGINE_GROUP) - conf.register_opts(ENGINE_OPTS, group=ENGINE_GROUP) - - -def list_opts(): - return { - ENGINE_GROUP: ENGINE_OPTS, - } diff --git a/senlin/conf/health_manager.py b/senlin/conf/health_manager.py deleted file mode 100644 index f63c4c2b0..000000000 --- a/senlin/conf/health_manager.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - -HEALTH_MANAGER_GROUP = cfg.OptGroup('health_manager') -HEALTH_MANAGER_OPTS = [ - cfg.StrOpt('nova_control_exchange', default='nova', - help=_("Exchange name for nova notifications.")), - cfg.StrOpt('nova_notification_topic', default='versioned_notifications', - help=_("Topic name for nova notifications.")), - cfg.StrOpt('heat_control_exchange', default='heat', - help=_("Exchange name for heat notifications.")), - cfg.StrOpt('heat_notification_topic', default='notifications', - help=_("Topic name for heat notifications.")), - cfg.MultiStrOpt("enabled_endpoints", default=['nova', 'heat'], - help=_("Notification endpoints to enable.")), - cfg.IntOpt('workers', - default=1, - help=_('Number of senlin-health-manager processes.')), - cfg.IntOpt('threads', - default=1000, - deprecated_name='health_manager_thread_pool_size', - deprecated_group="DEFAULT", - help=_('Number of senlin-health-manager threads.')), - cfg.IntOpt('cleanup_interval', - default=900, - help=_('Seconds between running periodic cleanup tasks.')), -] - - -def register_opts(conf): - conf.register_group(HEALTH_MANAGER_GROUP) - conf.register_opts(HEALTH_MANAGER_OPTS, group=HEALTH_MANAGER_GROUP) - - -def list_opts(): - return { - HEALTH_MANAGER_GROUP: HEALTH_MANAGER_OPTS - } diff --git a/senlin/conf/notification.py b/senlin/conf/notification.py deleted file mode 100644 index da3866fd2..000000000 --- a/senlin/conf/notification.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.common.i18n import _ - -NOTIFICATION_GROUP = cfg.OptGroup( - name='notification', -) - -NOTIFICATION_OPTS = [ - cfg.IntOpt('max_message_size', default=65535, - help=_('The max size(bytes) of message can be posted to ' - 'notification queue.')), - cfg.IntOpt('ttl', default=300, - help=_('The ttl in seconds of a message posted to ' - 'notification queue.')), -] - - -def register_opts(conf): - conf.register_group(NOTIFICATION_GROUP) - conf.register_opts(NOTIFICATION_OPTS, group=NOTIFICATION_GROUP) - - -def list_opts(): - return { - NOTIFICATION_GROUP: NOTIFICATION_OPTS, - } diff --git a/senlin/conf/opts.py b/senlin/conf/opts.py deleted file mode 100644 index 27f1838bf..000000000 --- a/senlin/conf/opts.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copied from nova - -""" -This is the single point of entry to generate the sample configuration -file for Senlin. It collects all the necessary info from the other modules -in this package. It is assumed that: - -* every other module in this package has a 'list_opts' function which - return a dict where - * the keys are strings which are the group names - * the value of each key is a list of config options for that group -* the senlin.conf package doesn't have further packages with config options -* this module is only used in the context of sample file generation -""" - -import collections -import importlib -import os -import pkgutil - -LIST_OPTS_FUNC_NAME = "list_opts" - - -def _tupleize(dct): - """Take the dict of options and convert to the 2-tuple format.""" - return [(key, val) for key, val in dct.items()] - - -def list_opts(): - """Return a list of oslo.config options available. - - The purpose of this function is to allow tools like the Oslo sample config - file generator to discover the options exposed to users by this service. - The returned list includes all oslo.config options which may be registered - at runtime by the service api/engine. - - This function is also discoverable via the 'senlin.conf' entry point - under the 'oslo.config.opts' namespace. - - :returns: a list of (group_name, opts) tuples - """ - opts = collections.defaultdict(list) - module_names = _list_module_names() - imported_modules = _import_modules(module_names) - _append_config_options(imported_modules, opts) - return _tupleize(opts) - - -def _list_module_names(): - module_names = [] - package_path = os.path.dirname(os.path.abspath(__file__)) - for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]): - if modname == "opts" or ispkg: - continue - else: - module_names.append(modname) - return module_names - - -def _import_modules(module_names): - imported_modules = [] - for modname in module_names: - mod = importlib.import_module("senlin.conf." + modname) - if not hasattr(mod, LIST_OPTS_FUNC_NAME): - msg = ("The module 'senlin.conf.%s' should have a '%s' " - "function which returns the config options." % - (modname, LIST_OPTS_FUNC_NAME)) - raise Exception(msg) - else: - imported_modules.append(mod) - return imported_modules - - -def _append_config_options(imported_modules, config_options): - for mod in imported_modules: - configs = mod.list_opts() - for key, val in configs.items(): - config_options[key].extend(val) diff --git a/senlin/conf/receiver.py b/senlin/conf/receiver.py deleted file mode 100644 index 14b4237c1..000000000 --- a/senlin/conf/receiver.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.common.i18n import _ - -RECEIVER_GROUP = cfg.OptGroup( - name='receiver', -) - -RECEIVER_OPTS = [ - cfg.StrOpt('host', deprecated_group='webhook', - help=_('The address for notifying and triggering receivers. ' - 'It is useful for case Senlin API service is running ' - 'behind a proxy.')), - cfg.PortOpt('port', default=8777, deprecated_group='webhook', - help=_('The port for notifying and triggering receivers. ' - 'It is useful for case Senlin API service is running ' - 'behind a proxy.')), - cfg.IntOpt('max_message_size', default=65535, - help=_('The max size(bytes) of message can be posted to ' - 'receiver queue.')), -] - - -def register_opts(conf): - conf.register_group(RECEIVER_GROUP) - conf.register_opts(RECEIVER_OPTS, group=RECEIVER_GROUP) - - -def list_opts(): - return { - RECEIVER_GROUP: RECEIVER_OPTS, - } diff --git a/senlin/conf/revision.py b/senlin/conf/revision.py deleted file mode 100644 index 94e919b8b..000000000 --- a/senlin/conf/revision.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from senlin.common.i18n import _ - -REVISION_GROUP = cfg.OptGroup('revision') -REVISION_OPTS = [ - cfg.StrOpt('senlin_api_revision', default='1.0', - help=_('Senlin API revision.')), - cfg.StrOpt('senlin_engine_revision', default='1.0', - help=_('Senlin engine revision.')) -] - - -def register_opts(conf): - conf.register_group(REVISION_GROUP) - conf.register_opts(REVISION_OPTS, group=REVISION_GROUP) - - -def list_opts(): - return { - REVISION_GROUP: REVISION_OPTS - } diff --git a/senlin/conf/zaqar.py b/senlin/conf/zaqar.py deleted file mode 100644 index 867097390..000000000 --- a/senlin/conf/zaqar.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from keystoneauth1 import loading as ksa_loading -from oslo_config import cfg - -from senlin.common.i18n import _ - -ZAQAR_GROUP = cfg.OptGroup( - name='zaqar', - title=_('Configuration options for zaqar trustee.') -) - - -def register_opts(conf): - conf.register_group(ZAQAR_GROUP) - ksa_loading.register_session_conf_options(conf, ZAQAR_GROUP) - ksa_loading.register_auth_conf_options(conf, ZAQAR_GROUP) - - -def list_opts(): - return { - ZAQAR_GROUP: (ksa_loading.get_auth_common_conf_options() + - ksa_loading.get_auth_plugin_conf_options('password')) - } diff --git a/senlin/db/__init__.py b/senlin/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/db/api.py b/senlin/db/api.py deleted file mode 100644 index 0cec1e244..000000000 --- a/senlin/db/api.py +++ /dev/null @@ -1,544 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Interface for database access. - -SQLAlchemy is currently the only supported backend. -""" - -from oslo_config import cfg -from oslo_db import api - -CONF = cfg.CONF - - -_BACKEND_MAPPING = {'sqlalchemy': 'senlin.db.sqlalchemy.api'} - -IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) - - -def get_engine(): - return IMPL.get_engine() - - -# Clusters -def cluster_create(context, values): - return IMPL.cluster_create(context, values) - - -def cluster_get(context, cluster_id, project_safe=True): - return IMPL.cluster_get(context, cluster_id, project_safe=project_safe) - - -def cluster_get_by_name(context, cluster_name, project_safe=True): - return IMPL.cluster_get_by_name(context, cluster_name, - project_safe=project_safe) - - -def cluster_get_by_short_id(context, short_id, project_safe=True): - return IMPL.cluster_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def cluster_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - return IMPL.cluster_get_all(context, limit=limit, marker=marker, sort=sort, - filters=filters, project_safe=project_safe) - - -def cluster_next_index(context, cluster_id): - return IMPL.cluster_next_index(context, cluster_id) - - -def cluster_count_all(context, filters=None, project_safe=True): - return IMPL.cluster_count_all(context, filters=filters, - project_safe=project_safe) - - -def cluster_update(context, cluster_id, values): - return IMPL.cluster_update(context, cluster_id, values) - - -def cluster_delete(context, cluster_id): - return IMPL.cluster_delete(context, cluster_id) - - -# Nodes -def node_create(context, values): - return IMPL.node_create(context, values) - - -def node_get(context, node_id, project_safe=True): - return IMPL.node_get(context, node_id, project_safe=project_safe) - - -def node_get_by_name(context, name, project_safe=True): - return IMPL.node_get_by_name(context, name, project_safe=project_safe) - - -def node_get_by_short_id(context, short_id, project_safe=True): - return IMPL.node_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def node_get_all(context, cluster_id=None, limit=None, marker=None, sort=None, - filters=None, project_safe=True): - return IMPL.node_get_all(context, cluster_id=cluster_id, filters=filters, - limit=limit, marker=marker, sort=sort, - project_safe=project_safe) - - -def node_get_all_by_cluster(context, cluster_id, filters=None, - project_safe=True): - return IMPL.node_get_all_by_cluster(context, cluster_id, filters=filters, - project_safe=project_safe) - - -def node_ids_by_cluster(context, cluster_id, filters=None): - return IMPL.node_ids_by_cluster(context, cluster_id, filters=None) - - -def node_count_by_cluster(context, cluster_id, **kwargs): - return IMPL.node_count_by_cluster(context, cluster_id, **kwargs) - - -def node_update(context, node_id, values): - return IMPL.node_update(context, node_id, values) - - -def node_migrate(context, node_id, to_cluster, timestamp, role=None): - return IMPL.node_migrate(context, node_id, to_cluster, timestamp, role) - - -def node_delete(context, node_id): - return IMPL.node_delete(context, node_id) - - -# Locks -def cluster_lock_acquire(cluster_id, action_id, scope): - return IMPL.cluster_lock_acquire(cluster_id, action_id, scope) - - -def cluster_is_locked(cluster_id): - return IMPL.cluster_is_locked(cluster_id) - - -def cluster_lock_release(cluster_id, action_id, scope): - return IMPL.cluster_lock_release(cluster_id, action_id, scope) - - -def cluster_lock_steal(node_id, action_id): - return IMPL.cluster_lock_steal(node_id, action_id) - - -def node_lock_acquire(node_id, action_id): - return IMPL.node_lock_acquire(node_id, action_id) - - -def node_is_locked(node_id): - return IMPL.node_is_locked(node_id) - - -def node_lock_release(node_id, action_id): - return IMPL.node_lock_release(node_id, action_id) - - -def node_lock_steal(node_id, action_id): - return IMPL.node_lock_steal(node_id, action_id) - - -# Policies -def policy_create(context, values): - return IMPL.policy_create(context, values) - - -def policy_get(context, policy_id, project_safe=True): - return IMPL.policy_get(context, policy_id, project_safe=project_safe) - - -def policy_get_by_name(context, name, project_safe=True): - return IMPL.policy_get_by_name(context, name, project_safe=project_safe) - - -def policy_get_by_short_id(context, short_id, project_safe=True): - return IMPL.policy_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def policy_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - return IMPL.policy_get_all(context, limit=limit, marker=marker, sort=sort, - filters=filters, project_safe=project_safe) - - -def policy_update(context, policy_id, values): - return IMPL.policy_update(context, policy_id, values) - - -def policy_delete(context, policy_id): - return IMPL.policy_delete(context, policy_id) - - -# Cluster-Policy Associations -def cluster_policy_get(context, cluster_id, policy_id): - return IMPL.cluster_policy_get(context, cluster_id, policy_id) - - -def cluster_policy_get_all(context, cluster_id, filters=None, sort=None): - return IMPL.cluster_policy_get_all(context, cluster_id, filters=filters, - sort=sort) - - -def cluster_policy_ids_by_cluster(context, cluster_id): - return IMPL.cluster_policy_ids_by_cluster(context, cluster_id) - - -def cluster_policy_get_by_type(context, cluster_id, policy_type, filters=None): - return IMPL.cluster_policy_get_by_type(context, cluster_id, policy_type, - filters=filters) - - -def cluster_policy_get_by_name(context, cluster_id, policy_name, filters=None): - return IMPL.cluster_policy_get_by_name(context, cluster_id, policy_name, - filters=filters) - - -def cluster_policy_attach(context, cluster_id, policy_id, values): - return IMPL.cluster_policy_attach(context, cluster_id, policy_id, values) - - -def cluster_policy_detach(context, cluster_id, policy_id): - return IMPL.cluster_policy_detach(context, cluster_id, policy_id) - - -def cluster_policy_update(context, cluster_id, policy_id, values): - return IMPL.cluster_policy_update(context, cluster_id, policy_id, values) - - -# Profiles -def profile_create(context, values): - return IMPL.profile_create(context, values) - - -def profile_get(context, profile_id, project_safe=True): - return IMPL.profile_get(context, profile_id, project_safe=project_safe) - - -def profile_get_by_name(context, name, project_safe=True): - return IMPL.profile_get_by_name(context, name, project_safe=project_safe) - - -def profile_get_by_short_id(context, short_id, project_safe=True): - return IMPL.profile_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def profile_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - return IMPL.profile_get_all(context, limit=limit, marker=marker, - sort=sort, filters=filters, - project_safe=project_safe) - - -def profile_update(context, profile_id, values): - return IMPL.profile_update(context, profile_id, values) - - -def profile_delete(context, profile_id): - return IMPL.profile_delete(context, profile_id) - - -# Credential -def cred_create(context, values): - return IMPL.cred_create(context, values) - - -def cred_get(context, user, project): - return IMPL.cred_get(context, user, project) - - -def cred_update(context, user, project, values): - return IMPL.cred_update(context, user, project, values) - - -def cred_delete(context, user, project): - return IMPL.cred_delete(context, user, project) - - -def cred_create_update(context, values): - return IMPL.cred_create_update(context, values) - - -# Events -def event_create(context, values): - return IMPL.event_create(context, values) - - -def event_get(context, event_id, project_safe=True): - return IMPL.event_get(context, event_id, project_safe=project_safe) - - -def event_get_by_short_id(context, short_id, project_safe=True): - return IMPL.event_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def event_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - return IMPL.event_get_all(context, limit=limit, marker=marker, sort=sort, - filters=filters, project_safe=project_safe) - - -def event_count_by_cluster(context, cluster_id, project_safe=True): - return IMPL.event_count_by_cluster(context, cluster_id, - project_safe=project_safe) - - -def event_get_all_by_cluster(context, cluster_id, limit=None, marker=None, - sort=None, filters=None, project_safe=True): - return IMPL.event_get_all_by_cluster(context, cluster_id, filters=filters, - limit=limit, marker=marker, sort=sort, - project_safe=project_safe) - - -def event_prune(context, cluster_id, project_safe=True): - return IMPL.event_prune(context, cluster_id, project_safe=project_safe) - - -# Actions -def action_create(context, values): - return IMPL.action_create(context, values) - - -def action_update(context, action_id, values): - return IMPL.action_update(context, action_id, values) - - -def action_get(context, action_id, project_safe=True, refresh=False): - return IMPL.action_get(context, action_id, project_safe=project_safe, - refresh=refresh) - - -def action_list_active_scaling(context, cluster_id, project_safe=True): - return IMPL.action_list_active_scaling(context, cluster_id, - project_safe=project_safe) - - -def action_get_by_name(context, name, project_safe=True): - return IMPL.action_get_by_name(context, name, project_safe=project_safe) - - -def action_get_by_short_id(context, short_id, project_safe=True): - return IMPL.action_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def action_get_all_by_owner(context, owner): - return IMPL.action_get_all_by_owner(context, owner) - - -def action_get_all_active_by_target(context, target_id, project_safe=True): - return IMPL.action_get_all_active_by_target(context, target_id, - project_safe=project_safe) - - -def action_get_all(context, filters=None, limit=None, marker=None, sort=None, - project_safe=True): - return IMPL.action_get_all(context, filters=filters, sort=sort, - limit=limit, marker=marker, - project_safe=project_safe) - - -def action_check_status(context, action_id, timestamp): - return IMPL.action_check_status(context, action_id, timestamp) - - -def action_delete_by_target(context, target, action=None, - action_excluded=None, status=None): - return IMPL.action_delete_by_target(context, target, action=action, - action_excluded=action_excluded, - status=status) - - -def dependency_add(context, depended, dependent): - return IMPL.dependency_add(context, depended, dependent) - - -def dependency_get_depended(context, action_id): - return IMPL.dependency_get_depended(context, action_id) - - -def dependency_get_dependents(context, action_id): - return IMPL.dependency_get_dependents(context, action_id) - - -def action_mark_succeeded(context, action_id, timestamp): - return IMPL.action_mark_succeeded(context, action_id, timestamp) - - -def action_mark_ready(context, action_id, timestamp): - return IMPL.action_mark_ready(context, action_id, timestamp) - - -def action_mark_failed(context, action_id, timestamp, reason=None): - return IMPL.action_mark_failed(context, action_id, timestamp, reason) - - -def action_mark_cancelled(context, action_id, timestamp): - return IMPL.action_mark_cancelled(context, action_id, timestamp) - - -def action_acquire(context, action_id, owner, timestamp): - return IMPL.action_acquire(context, action_id, owner, timestamp) - - -def action_acquire_random_ready(context, owner, timestamp): - return IMPL.action_acquire_random_ready(context, owner, timestamp) - - -def action_acquire_first_ready(context, owner, timestamp): - return IMPL.action_acquire_first_ready(context, owner, timestamp) - - -def action_abandon(context, action_id, values=None): - return IMPL.action_abandon(context, action_id, values) - - -def action_lock_check(context, action_id, owner=None): - """Check whether an action has been locked(by an owner).""" - return IMPL.action_lock_check(context, action_id, owner) - - -def action_signal(context, action_id, value): - """Send signal to an action via DB.""" - return IMPL.action_signal(context, action_id, value) - - -def action_signal_query(context, action_id): - """Query signal status for the specified action.""" - return IMPL.action_signal_query(context, action_id) - - -def action_delete(context, action_id): - return IMPL.action_delete(context, action_id) - - -def receiver_create(context, values): - return IMPL.receiver_create(context, values) - - -def receiver_get(context, receiver_id, project_safe=True): - return IMPL.receiver_get(context, receiver_id, project_safe=project_safe) - - -def receiver_get_by_name(context, name, project_safe=True): - return IMPL.receiver_get_by_name(context, name, project_safe=project_safe) - - -def receiver_get_by_short_id(context, short_id, project_safe=True): - return IMPL.receiver_get_by_short_id(context, short_id, - project_safe=project_safe) - - -def receiver_get_all(context, limit=None, marker=None, filters=None, sort=None, - project_safe=True): - return IMPL.receiver_get_all(context, limit=limit, marker=marker, - sort=sort, filters=filters, - project_safe=project_safe) - - -def receiver_delete(context, receiver_id): - return IMPL.receiver_delete(context, receiver_id) - - -def receiver_update(context, receiver_id, values): - return IMPL.receiver_update(context, receiver_id, values) - - -def service_create(service_id, host=None, binary=None, topic=None): - return IMPL.service_create(service_id, host=host, binary=binary, - topic=topic) - - -def service_update(service_id, values=None): - return IMPL.service_update(service_id, values=values) - - -def service_delete(service_id): - return IMPL.service_delete(service_id) - - -def service_get(service_id): - return IMPL.service_get(service_id) - - -def service_get_all(): - return IMPL.service_get_all() - - -def service_cleanup_all_expired(binary): - return IMPL.service_cleanup_all_expired(binary) - - -def gc_by_engine(engine_id): - return IMPL.gc_by_engine(engine_id) - - -def registry_create(context, cluster_id, check_type, interval, params, - engine_id, enabled=True): - return IMPL.registry_create(context, cluster_id, check_type, interval, - params, engine_id, enabled=enabled) - - -def registry_update(context, cluster_id, values): - return IMPL.registry_update(context, cluster_id, values) - - -def registry_delete(context, cluster_id): - return IMPL.registry_delete(context, cluster_id) - - -def registry_claim(context, engine_id): - return IMPL.registry_claim(context, engine_id) - - -def registry_get(context, cluster_id): - return IMPL.registry_get(context, cluster_id) - - -def registry_get_by_param(context, params): - return IMPL.registry_get_by_param(context, params) - - -def registry_list_ids_by_service(context, params): - return IMPL.registry_list_by_service(context, params) - - -def db_sync(db_url=None): - """Migrate the database to the most recent version.""" - return IMPL.db_sync(db_url) - - -def db_version(): - """Display the current database version.""" - return IMPL.db_version() - - -def event_purge(engine, project, granularity, age): - """Purge the event records in database.""" - return IMPL.event_purge(project, granularity, age) - - -def action_purge(engine, project, granularity, age): - """Purge the action records in database.""" - return IMPL.action_purge(project, granularity, age) diff --git a/senlin/db/sqlalchemy/__init__.py b/senlin/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/db/sqlalchemy/alembic.ini b/senlin/db/sqlalchemy/alembic.ini deleted file mode 100644 index 34be5f67a..000000000 --- a/senlin/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,105 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s -# Uncomment the line below if you want the files to be prepended with date and time -# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file -# for all available tokens -# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. -prepend_sys_path = . - -# timezone to use when rendering the date within the migration file -# as well as the filename. -# If specified, requires the python-dateutil library that can be -# installed by adding `alembic[tz]` to the pip requirements -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; This defaults -# to alembic/versions. When using multiple version -# directories, initial revisions must be specified with --version-path. -# The path separator used here should be the separator specified by "version_path_separator" below. -# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions - -# version path separator; As mentioned above, this is the character used to split -# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. -# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. -# Valid values for version_path_separator are: -# -# version_path_separator = : -# version_path_separator = ; -# version_path_separator = space -version_path_separator = os # Use os.pathsep. Default configuration used for new projects. - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/senlin/db/sqlalchemy/alembic/README b/senlin/db/sqlalchemy/alembic/README deleted file mode 100644 index 56a5348c1..000000000 --- a/senlin/db/sqlalchemy/alembic/README +++ /dev/null @@ -1,4 +0,0 @@ -Please use the "selin-manage database" command for database management. - -Developers adding new migrations can run 'alembic -m ""' from -the senlin/db/sqlalchemy directory where the alembic.ini file is located. diff --git a/senlin/db/sqlalchemy/alembic/__init__.py b/senlin/db/sqlalchemy/alembic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/db/sqlalchemy/alembic/env.py b/senlin/db/sqlalchemy/alembic/env.py deleted file mode 100644 index fc48500b4..000000000 --- a/senlin/db/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License.mport threading -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option('sqlalchemy.url') - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={'paramstyle': 'named'}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = config.attributes.get('connection', None) - if connectable is None: - # only create Engine if we don't have a Connection from the outside - connectable = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool, - ) - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata, - render_as_batch=True, - transactional_ddl=True, - transaction_per_migration=True, - ) - - with context.begin_transaction(): - context.run_migrations() - else: - context.configure( - connection=connectable, - target_metadata=target_metadata, - render_as_batch=True, - transactional_ddl=True, - transaction_per_migration=True, - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/senlin/db/sqlalchemy/alembic/legacy_utils.py b/senlin/db/sqlalchemy/alembic/legacy_utils.py deleted file mode 100644 index 0824b224c..000000000 --- a/senlin/db/sqlalchemy/alembic/legacy_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2022 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copied from designate/storage/impl_sqlalchemy/alembic/legacy_utils.py - - -from alembic import op -from oslo_log import log as logging -import sqlalchemy as sa - -LOG = logging.getLogger(__name__) - - -def is_migration_needed(equivalent_revision): - metadata = sa.MetaData() - metadata.reflect(bind=op.get_bind()) - - if 'migrate_version' not in metadata.tables.keys(): - return True - - version_sql = sa.text("SELECT version FROM migrate_version;") - legacy_db_rev = None - try: - legacy_db_rev = op.get_bind().execute(version_sql).scalar_one_or_none() - except Exception as e: - LOG.debug("Unable to query the database for the legacy revision " - "number. Assuming there is no legacy migration revision " - "or the migration is running in offline mode. Error: %s", - str(e)) - - # Check if this migration was already run by the legacy sqlalchemy-migrate - # migrations. - if legacy_db_rev and int(legacy_db_rev) >= equivalent_revision: - return False - return True diff --git a/senlin/db/sqlalchemy/alembic/script.py.mako b/senlin/db/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index 8fd7d5466..000000000 --- a/senlin/db/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade() -> None: - ${upgrades if upgrades else "pass"} diff --git a/senlin/db/sqlalchemy/alembic/versions/004f8202c264_action_clusterid.py b/senlin/db/sqlalchemy/alembic/versions/004f8202c264_action_clusterid.py deleted file mode 100644 index 8c92a43f2..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/004f8202c264_action_clusterid.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""action clusterid - -Revision ID: 004f8202c264 -Revises: aaa7e7755feb -Create Date: 2023-03-25 14:36:31.844399 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = '004f8202c264' -down_revision = 'aaa7e7755feb' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(15): - return - - op.add_column('action', sa.Column('cluster_id', sa.String(36), default='')) diff --git a/senlin/db/sqlalchemy/alembic/versions/0c04e812f224_user_project_length.py b/senlin/db/sqlalchemy/alembic/versions/0c04e812f224_user_project_length.py deleted file mode 100644 index 4fcc170bc..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/0c04e812f224_user_project_length.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""user project length - -Revision ID: 0c04e812f224 -Revises: 9dbb563afc4d -Create Date: 2023-03-25 14:36:03.881164 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = '0c04e812f224' -down_revision = '9dbb563afc4d' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(10): - return - - for table_name in ['profile', 'policy', 'cluster', 'credential']: - with op.batch_alter_table(table_name) as batch_op: - batch_op.alter_column( - 'user', type_=sa.String(64), - nullabe=False, - existing_nullable=False, - existing_type=sa.String(32) - ) - - with op.batch_alter_table(table_name) as batch_op: - batch_op.alter_column( - 'project', type_=sa.String(64), - nullabe=False, - existing_nullable=False, - existing_type=sa.String(32) - ) - - for table_name in ['node', 'receiver', 'action', 'event']: - with op.batch_alter_table(table_name) as batch_op: - batch_op.alter_column( - 'user', type_=sa.String(64), existing_type=sa.String(32) - ) - - with op.batch_alter_table(table_name) as batch_op: - batch_op.alter_column( - 'project', type_=sa.String(64), existing_type=sa.String(32) - ) - - for table_name in ['profile', 'policy', 'cluster', 'node', 'receiver', - 'action']: - with op.batch_alter_table(table_name) as batch_op: - batch_op.alter_column( - 'domain', type_=sa.String(64), existing_type=sa.String(32) - ) diff --git a/senlin/db/sqlalchemy/alembic/versions/3a04debb8cb1_cluster_config.py b/senlin/db/sqlalchemy/alembic/versions/3a04debb8cb1_cluster_config.py deleted file mode 100644 index d864b25ef..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/3a04debb8cb1_cluster_config.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""cluster config - -Revision ID: 3a04debb8cb1 -Revises: 5b7cb185e0a5 -Create Date: 2023-03-25 14:36:15.011662 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils -from senlin.db.sqlalchemy import types - -# revision identifiers, used by Alembic. -revision = '3a04debb8cb1' -down_revision = '5b7cb185e0a5' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(12): - return - - op.add_column('cluster', sa.Column('config', types.Dict)) diff --git a/senlin/db/sqlalchemy/alembic/versions/569eb0b8_first_version.py b/senlin/db/sqlalchemy/alembic/versions/569eb0b8_first_version.py deleted file mode 100644 index bc74e7c26..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/569eb0b8_first_version.py +++ /dev/null @@ -1,246 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""First version - -Revision ID: 569eb0b8 -Revises: -Create Date: 2023-03-25 14:35:24.421351 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils -from senlin.db.sqlalchemy import types - -# revision identifiers, used by Alembic. -revision = '569eb0b8' -down_revision = None -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(1): - return - - metadata = sa.MetaData() - - op.create_table( - 'profile', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255)), - sa.Column('type', sa.String(255)), - sa.Column('context', types.Dict), - sa.Column('spec', types.Dict), - sa.Column('user', sa.String(32), nullable=False), - sa.Column('project', sa.String(32), nullable=False), - sa.Column('domain', sa.String(32)), - sa.Column('permission', sa.String(32)), - sa.Column('meta_data', types.Dict), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'cluster', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('profile_id', sa.String(36), sa.ForeignKey('profile.id'), - nullable=False), - sa.Column('user', sa.String(32), nullable=False), - sa.Column('project', sa.String(32), nullable=False), - sa.Column('domain', sa.String(32)), - sa.Column('parent', sa.String(36)), - sa.Column('init_at', sa.DateTime), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('min_size', sa.Integer), - sa.Column('max_size', sa.Integer), - sa.Column('desired_capacity', sa.Integer), - sa.Column('next_index', sa.Integer), - sa.Column('timeout', sa.Integer), - sa.Column('status', sa.String(255)), - sa.Column('status_reason', sa.Text), - sa.Column('meta_data', types.Dict), - sa.Column('data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'node', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255)), - sa.Column('physical_id', sa.String(36)), - sa.Column('cluster_id', sa.String(36)), - sa.Column('profile_id', sa.String(36), sa.ForeignKey('profile.id'), - nullable=False), - sa.Column('user', sa.String(32)), - sa.Column('project', sa.String(32)), - sa.Column('domain', sa.String(32)), - sa.Column('index', sa.Integer), - sa.Column('role', sa.String(64)), - sa.Column('init_at', sa.DateTime), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('status', sa.String(255)), - sa.Column('status_reason', sa.Text), - sa.Column('meta_data', types.Dict), - sa.Column('data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'cluster_lock', metadata, - sa.Column('cluster_id', sa.String(36), primary_key=True, - nullable=False), - sa.Column('action_ids', types.List), - sa.Column('semaphore', sa.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'node_lock', metadata, - sa.Column('node_id', sa.String(36), primary_key=True, nullable=False), - sa.Column('action_id', sa.String(36)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'policy', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255)), - sa.Column('type', sa.String(255)), - sa.Column('user', sa.String(32), nullable=False), - sa.Column('project', sa.String(32), nullable=False), - sa.Column('domain', sa.String(32)), - sa.Column('cooldown', sa.Integer), - sa.Column('level', sa.Integer), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('spec', types.Dict), - sa.Column('data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'cluster_policy', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('cluster_id', sa.String(36), sa.ForeignKey('cluster.id'), - nullable=False), - sa.Column('policy_id', sa.String(36), sa.ForeignKey('policy.id'), - nullable=False), - sa.Column('cooldown', sa.Integer), - sa.Column('priority', sa.Integer), - sa.Column('level', sa.Integer), - sa.Column('enabled', sa.Boolean), - sa.Column('data', types.Dict), - sa.Column('last_op', sa.DateTime), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'receiver', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(255)), - sa.Column('type', sa.String(255)), - sa.Column('user', sa.String(32)), - sa.Column('project', sa.String(32)), - sa.Column('domain', sa.String(32)), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('cluster_id', sa.String(36)), - sa.Column('actor', types.Dict), - sa.Column('action', sa.Text), - sa.Column('params', types.Dict), - sa.Column('channel', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'credential', metadata, - sa.Column('user', sa.String(32), primary_key=True, nullable=False), - sa.Column('project', sa.String(32), primary_key=True, nullable=False), - sa.Column('cred', types.Dict, nullable=False), - sa.Column('data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'action', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('name', sa.String(63)), - sa.Column('context', types.Dict), - sa.Column('target', sa.String(36)), - sa.Column('action', sa.Text), - sa.Column('cause', sa.String(255)), - sa.Column('owner', sa.String(36)), - sa.Column('interval', sa.Integer), - # FIXME: Don't specify fixed precision. - sa.Column('start_time', sa.Float(precision='24,8')), - sa.Column('end_time', sa.Float(precision='24,8')), - sa.Column('timeout', sa.Integer), - sa.Column('control', sa.String(255)), - sa.Column('status', sa.String(255)), - sa.Column('status_reason', sa.Text), - sa.Column('inputs', types.Dict), - sa.Column('outputs', types.Dict), - sa.Column('depends_on', types.List), - sa.Column('depended_by', types.List), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'dependency', metadata, - sa.Column('id', sa.String(36), nullable=False, primary_key=True), - sa.Column('depended', sa.String(36), sa.ForeignKey('action.id'), - nullable=False), - sa.Column('dependent', sa.String(36), sa.ForeignKey('action.id'), - nullable=False), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - - op.create_table( - 'event', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('timestamp', sa.DateTime, nullable=False), - sa.Column('obj_id', sa.String(36)), - sa.Column('obj_name', sa.String(255)), - sa.Column('obj_type', sa.String(36)), - sa.Column('cluster_id', sa.String(36)), - sa.Column('level', sa.String(63)), - sa.Column('user', sa.String(32)), - sa.Column('project', sa.String(32)), - sa.Column('action', sa.String(36)), - sa.Column('status', sa.String(255)), - sa.Column('status_reason', sa.Text), - sa.Column('meta_data', types.Dict), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) diff --git a/senlin/db/sqlalchemy/alembic/versions/5b7cb185e0a5_registry_enable.py b/senlin/db/sqlalchemy/alembic/versions/5b7cb185e0a5_registry_enable.py deleted file mode 100644 index 7a0776c59..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/5b7cb185e0a5_registry_enable.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""registry enable - -Revision ID: 5b7cb185e0a5 -Revises: 0c04e812f224 -Create Date: 2023-03-25 14:36:09.547376 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = '5b7cb185e0a5' -down_revision = '0c04e812f224' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(11): - return - - op.add_column('health_registry', sa.Column('enabled', sa.Boolean)) diff --git a/senlin/db/sqlalchemy/alembic/versions/662f8e74ac6f_event_column_name.py b/senlin/db/sqlalchemy/alembic/versions/662f8e74ac6f_event_column_name.py deleted file mode 100644 index d11edd56e..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/662f8e74ac6f_event_column_name.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""event column name - -Revision ID: 662f8e74ac6f -Revises: ab7b23c67360 -Create Date: 2023-03-25 14:35:44.367382 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = '662f8e74ac6f' -down_revision = 'ab7b23c67360' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(5): - return - - with op.batch_alter_table('event') as batch_op: - batch_op.alter_column('obj_id', new_column_name='oid', - existing_type=sa.String(36)) - - with op.batch_alter_table('event') as batch_op: - batch_op.alter_column('obj_name', new_column_name='oname', - existing_type=sa.String(255)) - - with op.batch_alter_table('event') as batch_op: - batch_op.alter_column('obj_type', new_column_name='otype', - existing_type=sa.String(36)) diff --git a/senlin/db/sqlalchemy/alembic/versions/6f73af60_service_table.py b/senlin/db/sqlalchemy/alembic/versions/6f73af60_service_table.py deleted file mode 100644 index db21eac5d..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/6f73af60_service_table.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Service table - -Revision ID: 6f73af60 -Revises: 569eb0b8 -Create Date: 2023-03-25 14:35:25.221356 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = '6f73af60' -down_revision = '569eb0b8' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(2): - return - - metadata = sa.MetaData() - - op.create_table( - 'service', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('host', sa.String(255)), - sa.Column('binary', sa.String(255)), - sa.Column('topic', sa.String(255)), - sa.Column('disabled', sa.Boolean), - sa.Column('disabled_reason', sa.String(255)), - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) diff --git a/senlin/db/sqlalchemy/alembic/versions/9dbb563afc4d_node_cluster_dependents_column.py b/senlin/db/sqlalchemy/alembic/versions/9dbb563afc4d_node_cluster_dependents_column.py deleted file mode 100644 index 885a6c7ea..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/9dbb563afc4d_node_cluster_dependents_column.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""node cluster dependents column - -Revision ID: 9dbb563afc4d -Revises: 662f8e74ac6f -Create Date: 2023-03-25 14:35:52.059731 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils -from senlin.db.sqlalchemy import types - -# revision identifiers, used by Alembic. -revision = '9dbb563afc4d' -down_revision = '662f8e74ac6f' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(6): - return - - op.add_column('node', sa.Column('dependents', types.Dict())) - op.add_column('cluster', sa.Column('dependents', types.Dict())) diff --git a/senlin/db/sqlalchemy/alembic/versions/aaa7e7755feb_node_tainted.py b/senlin/db/sqlalchemy/alembic/versions/aaa7e7755feb_node_tainted.py deleted file mode 100644 index fc4f93bcf..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/aaa7e7755feb_node_tainted.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""node tainted - -Revision ID: aaa7e7755feb -Revises: beffe13cf8e5 -Create Date: 2023-03-25 14:36:27.241478 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = 'aaa7e7755feb' -down_revision = 'beffe13cf8e5' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(14): - return - - op.add_column('node', sa.Column('tainted', sa.Boolean)) diff --git a/senlin/db/sqlalchemy/alembic/versions/ab7b23c67360_health_registry.py b/senlin/db/sqlalchemy/alembic/versions/ab7b23c67360_health_registry.py deleted file mode 100644 index 551801cdf..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/ab7b23c67360_health_registry.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""health registry - -Revision ID: ab7b23c67360 -Revises: c3e2bfa76dea -Create Date: 2023-03-25 14:35:33.776610 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils -from senlin.db.sqlalchemy import types - -# revision identifiers, used by Alembic. -revision = 'ab7b23c67360' -down_revision = 'c3e2bfa76dea' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(4): - return - - metadata = sa.MetaData() - - cluster = sa.Table('cluster', metadata, autoload_with=op.get_bind()) - op.create_table( - 'health_registry', metadata, - sa.Column('id', sa.String(36), primary_key=True, nullable=False), - sa.Column('cluster_id', sa.String(36), sa.ForeignKey(cluster.c.id), - nullable=False), - sa.Column('check_type', sa.String(255)), - sa.Column('interval', sa.Integer), - sa.Column('params', types.Dict), - sa.Column('engine_id', sa.String(36)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) diff --git a/senlin/db/sqlalchemy/alembic/versions/beffe13cf8e5_action_starttime_endtime_type.py b/senlin/db/sqlalchemy/alembic/versions/beffe13cf8e5_action_starttime_endtime_type.py deleted file mode 100644 index af73339a8..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/beffe13cf8e5_action_starttime_endtime_type.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""action starttime endtime type - -Revision ID: beffe13cf8e5 -Revises: 3a04debb8cb1 -Create Date: 2023-03-25 14:36:21.522415 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = 'beffe13cf8e5' -down_revision = '3a04debb8cb1' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(13): - return - - with op.batch_alter_table('action') as batch_op: - batch_op.alter_column('start_time', type_=sa.Numeric('18,6'), - existing_type=sa.Numeric('24,8')) - - with op.batch_alter_table('action') as batch_op: - batch_op.alter_column('end_time', type_=sa.Numeric('18,6'), - existing_type=sa.Numeric('24,8')) diff --git a/senlin/db/sqlalchemy/alembic/versions/c3e2bfa76dea_action_tenant.py b/senlin/db/sqlalchemy/alembic/versions/c3e2bfa76dea_action_tenant.py deleted file mode 100644 index c274c21eb..000000000 --- a/senlin/db/sqlalchemy/alembic/versions/c3e2bfa76dea_action_tenant.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""action tenant - -Revision ID: c3e2bfa76dea -Revises: 6f73af60 -Create Date: 2023-03-25 14:35:26.721352 - -""" -from alembic import op -import sqlalchemy as sa - -from senlin.db.sqlalchemy.alembic import legacy_utils - -# revision identifiers, used by Alembic. -revision = 'c3e2bfa76dea' -down_revision = '6f73af60' -branch_labels = None -depends_on = None - - -def upgrade() -> None: - # Check if the equivalent legacy migration has already run - if not legacy_utils.is_migration_needed(3): - return - - op.add_column('action', sa.Column('user', sa.String(32))) - op.add_column('action', sa.Column('project', sa.String(32))) - op.add_column('action', sa.Column('domain', sa.String(32))) diff --git a/senlin/db/sqlalchemy/api.py b/senlin/db/sqlalchemy/api.py deleted file mode 100644 index 028bb7010..000000000 --- a/senlin/db/sqlalchemy/api.py +++ /dev/null @@ -1,1887 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implementation of SQLAlchemy backend. -""" - -import datetime -import sys -import threading -import time - -from oslo_config import cfg -from oslo_db import api as oslo_db_api -from oslo_db import exception as db_exc -from oslo_db import options as db_options -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import timeutils -from osprofiler import opts as profiler -import sqlalchemy -from sqlalchemy import and_ -from sqlalchemy.orm import joinedload -from sqlalchemy.sql.expression import func - -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import migration -from senlin.db.sqlalchemy import models -from senlin.db.sqlalchemy import utils - -osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -_CONTEXT = None -_LOCK = threading.Lock() -_MAIN_CONTEXT_MANAGER = None - -CONF.import_opt('database_retry_limit', 'senlin.conf') -CONF.import_opt('database_retry_interval', 'senlin.conf') -CONF.import_opt('database_max_retry_interval', 'senlin.conf') - -try: - CONF.import_group('profiler', 'senlin.conf') -except cfg.NoSuchGroupError: - pass - - -def initialize(): - connection = CONF['database'].connection - db_options.set_defaults( - CONF, connection=connection - ) - profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False) - - -def _get_main_context_manager(): - global _LOCK - global _MAIN_CONTEXT_MANAGER - - with _LOCK: - if not _MAIN_CONTEXT_MANAGER: - initialize() - _MAIN_CONTEXT_MANAGER = enginefacade.transaction_context() - - return _MAIN_CONTEXT_MANAGER - - -def _get_context(): - global _CONTEXT - if _CONTEXT is None: - import threading - _CONTEXT = threading.local() - return _CONTEXT - - -def _wrap_session(sess): - if not osprofiler_sqlalchemy: - return sess - if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: - sess = osprofiler_sqlalchemy.wrap_session(sqlalchemy, sess) - return sess - - -def session_for_read(): - reader = _get_main_context_manager().reader - return _wrap_session(reader.using(_get_context())) - - -def session_for_write(): - writer = _get_main_context_manager().writer - return _wrap_session(writer.using(_get_context())) - - -def service_expired_time(): - return (timeutils.utcnow() - - datetime.timedelta(seconds=2.2 * CONF.periodic_interval)) - - -def get_engine(): - return _get_main_context_manager().writer.get_engine() - - -def get_backend(): - """The backend is this module itself.""" - return sys.modules[__name__] - - -def retry_on_deadlock(f): - return oslo_db_api.wrap_db_retry( - retry_on_deadlock=True, - max_retries=CONF.database_retry_limit, - retry_interval=CONF.database_retry_interval, - inc_retry_interval=True, - max_retry_interval=CONF.database_max_retry_interval)(f) - - -def query_by_short_id(session, context, model_query, model, short_id, - project_safe=True): - q = model_query(session) - q = q.filter(model.id.like('%s%%' % short_id)) - q = utils.filter_query_by_project(q, project_safe, context) - - if q.count() == 1: - return q.first() - elif q.count() == 0: - return None - else: - raise exception.MultipleChoices(arg=short_id) - - -def query_by_name(session, context, model_query, name, project_safe=True): - q = model_query(session) - q = q.filter_by(name=name) - q = utils.filter_query_by_project(q, project_safe, context) - - if q.count() == 1: - return q.first() - elif q.count() == 0: - return None - else: - raise exception.MultipleChoices(arg=name) - - -# Clusters -def cluster_model_query(session): - query = session.query(models.Cluster).options( - joinedload(models.Cluster.nodes), - joinedload(models.Cluster.profile), - joinedload(models.Cluster.policies) - ) - return query - - -@retry_on_deadlock -def cluster_create(context, values): - with session_for_write() as session: - cluster_ref = models.Cluster() - cluster_ref.update(values) - session.add(cluster_ref) - return cluster_get(context, cluster_ref.id) - - -def cluster_get(context, cluster_id, project_safe=True): - with session_for_read() as session: - cluster = cluster_model_query(session).get(cluster_id) - - if cluster is None: - return None - - return utils.check_resource_project(context, cluster, project_safe) - - -def cluster_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, cluster_model_query, name, - project_safe=project_safe) - - -def cluster_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, cluster_model_query, - models.Cluster, - short_id, project_safe=project_safe) - - -def _query_cluster_get_all(session, context, project_safe=True): - query = cluster_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - return query - - -def cluster_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - with session_for_read() as session: - query = _query_cluster_get_all(session, context, - project_safe=project_safe) - if filters: - query = utils.exact_filter(query, models.Cluster, filters) - - keys, dirs = utils.get_sort_params(sort, consts.CLUSTER_INIT_AT) - if marker: - marker = cluster_model_query(session).get(marker) - - return sa_utils.paginate_query(query, models.Cluster, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -@retry_on_deadlock -def cluster_next_index(context, cluster_id): - with session_for_write() as session: - cluster = session.query(models.Cluster).with_for_update().get( - cluster_id) - if cluster is None: - return 0 - - next_index = cluster.next_index - cluster.next_index = cluster.next_index + 1 - cluster.save(session) - return next_index - - -def cluster_count_all(context, filters=None, project_safe=True): - with session_for_read() as session: - query = _query_cluster_get_all(session, context, - project_safe=project_safe) - query = utils.exact_filter(query, models.Cluster, filters) - return query.count() - - -@retry_on_deadlock -def cluster_update(context, cluster_id, values): - with session_for_write() as session: - cluster = session.query( - models.Cluster).with_for_update().get(cluster_id) - - if not cluster: - raise exception.ResourceNotFound(type='cluster', id=cluster_id) - - cluster.update(values) - cluster.save(session) - - -@retry_on_deadlock -def cluster_delete(context, cluster_id): - with session_for_write() as session: - cluster = session.query(models.Cluster).get(cluster_id) - if cluster is None: - raise exception.ResourceNotFound(type='cluster', id=cluster_id) - - query = session.query(models.Node).filter_by(cluster_id=cluster_id) - nodes = query.all() - - if len(nodes) != 0: - for node in nodes: - session.delete(node) - - # Delete all related cluster_policies records - for cp in cluster.policies: - session.delete(cp) - - # Delete cluster - session.delete(cluster) - - -# Nodes -def node_model_query(session): - query = session.query(models.Node).options( - joinedload(models.Node.profile) - ) - return query - - -@retry_on_deadlock -def node_create(context, values): - # This operation is always called with cluster and node locked - with session_for_write() as session: - node = models.Node() - node.update(values) - session.add(node) - return node - - -def node_get(context, node_id, project_safe=True): - with session_for_read() as session: - node = node_model_query(session).get(node_id) - if not node: - return None - - return utils.check_resource_project(context, node, project_safe) - - -def node_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, node_model_query, name, - project_safe=project_safe) - - -def node_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, node_model_query, - models.Node, short_id, - project_safe=project_safe) - - -def _query_node_get_all(session, context, project_safe=True, cluster_id=None): - query = node_model_query(session) - - if cluster_id is not None: - query = query.filter_by(cluster_id=cluster_id) - - query = utils.filter_query_by_project(query, project_safe, context) - - return query - - -def node_get_all(context, cluster_id=None, limit=None, marker=None, sort=None, - filters=None, project_safe=True): - with session_for_read() as session: - query = _query_node_get_all(session, context, - project_safe=project_safe, - cluster_id=cluster_id) - - if filters: - query = utils.exact_filter(query, models.Node, filters) - - keys, dirs = utils.get_sort_params(sort, consts.NODE_INIT_AT) - if marker: - marker = node_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Node, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -def node_get_all_by_cluster(context, cluster_id, filters=None, - project_safe=True): - with session_for_read() as session: - query = _query_node_get_all(session, context, cluster_id=cluster_id, - project_safe=project_safe) - if filters: - query = utils.exact_filter(query, models.Node, filters) - - return query.all() - - -def node_ids_by_cluster(context, cluster_id, filters=None): - """an internal API for getting node IDs.""" - with session_for_read() as session: - query = session.query(models.Node.id).filter_by(cluster_id=cluster_id) - if filters: - query = utils.exact_filter(query, models.Node, filters) - - return [n[0] for n in query.all()] - - -def node_count_by_cluster(context, cluster_id, **kwargs): - with session_for_read() as session: - project_safe = kwargs.pop('project_safe', True) - query = node_model_query(session) - query = query.filter_by(cluster_id=cluster_id) - query = query.filter_by(**kwargs) - query = utils.filter_query_by_project(query, project_safe, context) - - return query.count() - - -@retry_on_deadlock -def node_update(context, node_id, values): - """Update a node with new property values. - - :param node_id: ID of the node to be updated. - :param values: A dictionary of values to be updated on the node. - :raises ResourceNotFound: The specified node does not exist in database. - """ - with session_for_write() as session: - node = session.query(models.Node).get(node_id) - if not node: - raise exception.ResourceNotFound(type='node', id=node_id) - - node.update(values) - node.save(session) - if 'status' in values and node.cluster_id is not None: - cluster = session.query(models.Cluster).get(node.cluster_id) - if cluster is not None: - if values['status'] == 'ERROR': - cluster.status = consts.CS_WARNING - if 'status_reason' in values: - cluster.status_reason = 'Node %(node)s: %(reason)s' % { - 'node': node.name, 'reason': values['status_reason']} - cluster.save(session) - - -@retry_on_deadlock -def node_add_dependents(context, depended, dependent, dep_type=None): - """Add dependency between nodes. - - :param depended: ID of the depended dependent. - :param dependent: ID of the dependent node or profile which has - dependencies on depended node. - :param dep_type: The type of dependency. It can be 'node' indicating a - dependency between two nodes; or 'profile' indicating a - dependency from profile to node. - :raises ResourceNotFound: The specified node does not exist in database. - """ - with session_for_write() as session: - dep_node = session.query(models.Node).get(depended) - if not dep_node: - raise exception.ResourceNotFound(type='node', id=depended) - - if dep_type is None or dep_type == 'node': - key = 'nodes' - else: - key = 'profiles' - dependents = dep_node.dependents.get(key, []) - dependents.append(dependent) - dep_node.dependents.update({key: dependents}) - dep_node.save(session) - - -@retry_on_deadlock -def node_remove_dependents(context, depended, dependent, dep_type=None): - """Remove dependency between nodes. - - :param depended: ID of the depended node. - :param dependent: ID of the node or profile which has dependencies on - the depended node. - :param dep_type: The type of dependency. It can be 'node' indicating a - dependency between two nodes; or 'profile' indicating a - dependency from profile to node. - - :raises ResourceNotFound: The specified node does not exist in database. - """ - with session_for_write() as session: - dep_node = session.query(models.Node).get(depended) - if not dep_node: - raise exception.ResourceNotFound(type='node', id=depended) - - if dep_type is None or dep_type == 'node': - key = 'nodes' - else: - key = 'profiles' - - dependents = dep_node.dependents.get(key, []) - if dependent in dependents: - dependents.remove(dependent) - if len(dependents) > 0: - dep_node.dependents.update({key: dependents}) - else: - dep_node.dependents.pop(key) - dep_node.save(session) - - -@retry_on_deadlock -def node_migrate(context, node_id, to_cluster, timestamp, role=None): - with session_for_write() as session: - node = session.query(models.Node).get(node_id) - from_cluster = node.cluster_id - if from_cluster: - node.index = -1 - if to_cluster: - node.index = cluster_next_index(context, to_cluster) - node.cluster_id = to_cluster if to_cluster else '' - node.updated_at = timestamp - node.role = role - node.save(session) - return node - - -@retry_on_deadlock -def node_delete(context, node_id): - with session_for_write() as session: - node = session.query(models.Node).get(node_id) - if not node: - # Note: this is okay, because the node may have already gone - return - session.delete(node) - - -# Locks -@retry_on_deadlock -def cluster_lock_acquire(cluster_id, action_id, scope): - """Acquire lock on a cluster. - - :param cluster_id: ID of the cluster. - :param action_id: ID of the action that attempts to lock the cluster. - :param scope: +1 means a node-level operation lock; -1 indicates - a cluster-level lock. - :return: A list of action IDs that currently works on the cluster. - """ - with session_for_write() as session: - query = session.query(models.ClusterLock).with_for_update() - lock = query.get(cluster_id) - if lock is not None: - if scope == 1 and lock.semaphore > 0: - if action_id not in lock.action_ids: - lock.action_ids.append(str(action_id)) - lock.semaphore += 1 - lock.save(session) - else: - lock = models.ClusterLock(cluster_id=cluster_id, - action_ids=[str(action_id)], - semaphore=scope) - session.add(lock) - return lock.action_ids - - -@retry_on_deadlock -def cluster_is_locked(cluster_id): - with session_for_read() as session: - query = session.query(models.ClusterLock) - lock = query.get(cluster_id) - return lock is not None - - -@retry_on_deadlock -def _release_cluster_lock(session, lock, action_id, scope): - success = False - if (scope == -1 and lock.semaphore < 0) or lock.semaphore == 1: - if str(action_id) in lock.action_ids: - session.delete(lock) - success = True - elif str(action_id) in lock.action_ids: - if lock.semaphore == 1: - session.delete(lock) - else: - lock.action_ids.remove(str(action_id)) - lock.semaphore -= 1 - lock.save(session) - success = True - return success - - -@retry_on_deadlock -def cluster_lock_release(cluster_id, action_id, scope): - """Release lock on a cluster. - - :param cluster_id: ID of the cluster. - :param action_id: ID of the action that attempts to release the cluster. - :param scope: +1 means a node-level operation lock; -1 indicates - a cluster-level lock. - :return: True indicates successful release, False indicates failure. - """ - with session_for_write() as session: - lock = session.query( - models.ClusterLock).with_for_update().get(cluster_id) - if lock is None: - return False - - return _release_cluster_lock(session, lock, action_id, scope) - - -@retry_on_deadlock -def cluster_lock_steal(cluster_id, action_id): - with session_for_write() as session: - lock = session.query( - models.ClusterLock).with_for_update().get(cluster_id) - if lock is not None: - lock.action_ids = [action_id] - lock.semaphore = -1 - lock.save(session) - else: - lock = models.ClusterLock(cluster_id=cluster_id, - action_ids=[action_id], - semaphore=-1) - session.add(lock) - - return lock.action_ids - - -@retry_on_deadlock -def node_lock_acquire(node_id, action_id): - with session_for_write() as session: - lock = session.query( - models.NodeLock).with_for_update().get(node_id) - if lock is None: - lock = models.NodeLock(node_id=node_id, action_id=action_id) - session.add(lock) - - return lock.action_id - - -@retry_on_deadlock -def node_is_locked(node_id): - with session_for_read() as session: - query = session.query(models.NodeLock) - lock = query.get(node_id) - - return lock is not None - - -@retry_on_deadlock -def node_lock_release(node_id, action_id): - with session_for_write() as session: - success = False - lock = session.query( - models.NodeLock).with_for_update().get(node_id) - if lock is not None and lock.action_id == action_id: - session.delete(lock) - success = True - - return success - - -@retry_on_deadlock -def node_lock_steal(node_id, action_id): - with session_for_write() as session: - lock = session.query( - models.NodeLock).with_for_update().get(node_id) - if lock is not None: - lock.action_id = action_id - lock.save(session) - else: - lock = models.NodeLock(node_id=node_id, action_id=action_id) - session.add(lock) - return lock.action_id - - -# Policies -def policy_model_query(session): - query = session.query(models.Policy).options( - joinedload(models.Policy.bindings) - ) - return query - - -@retry_on_deadlock -def policy_create(context, values): - with session_for_write() as session: - policy = models.Policy() - policy.update(values) - session.add(policy) - return policy - - -def policy_get(context, policy_id, project_safe=True): - with session_for_read() as session: - policy = policy_model_query(session) - policy = policy.filter_by(id=policy_id).first() - - if policy is None: - return None - - return utils.check_resource_project(context, policy, project_safe) - - -def policy_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, policy_model_query, name, - project_safe=project_safe) - - -def policy_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, policy_model_query, - models.Policy, - short_id, project_safe=project_safe) - - -def policy_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - with session_for_read() as session: - query = policy_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - if filters: - query = utils.exact_filter(query, models.Policy, filters) - - keys, dirs = utils.get_sort_params(sort, consts.POLICY_CREATED_AT) - if marker: - marker = policy_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Policy, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -@retry_on_deadlock -def policy_update(context, policy_id, values): - with session_for_write() as session: - policy = session.query(models.Policy).get(policy_id) - if not policy: - raise exception.ResourceNotFound(type='policy', id=policy_id) - - policy.update(values) - policy.save(session) - return policy - - -@retry_on_deadlock -def policy_delete(context, policy_id): - with session_for_write() as session: - policy = session.query(models.Policy).get(policy_id) - - if not policy: - return - - bindings = session.query(models.ClusterPolicies).filter_by( - policy_id=policy_id) - if bindings.count(): - raise exception.EResourceBusy(type='policy', id=policy_id) - session.delete(policy) - - -# Cluster-Policy Associations -def cluster_policy_model_query(session): - query = session.query(models.ClusterPolicies) - return query - - -def cluster_policy_get(context, cluster_id, policy_id): - with session_for_read() as session: - query = cluster_policy_model_query(session) - bindings = query.filter_by(cluster_id=cluster_id, - policy_id=policy_id) - return bindings.first() - - -def cluster_policy_get_all(context, cluster_id, filters=None, sort=None): - with session_for_read() as session: - query = session.query(models.ClusterPolicies) - query = query.filter_by(cluster_id=cluster_id) - - if filters is not None: - key_enabled = consts.CP_ENABLED - if key_enabled in filters: - filter_enabled = {key_enabled: filters[key_enabled]} - query = utils.exact_filter(query, models.ClusterPolicies, - filter_enabled) - key_type = consts.CP_POLICY_TYPE - key_name = consts.CP_POLICY_NAME - if key_type in filters and key_name in filters: - query = query.join(models.Policy).filter( - and_(models.Policy.type == filters[key_type], - models.Policy.name == filters[key_name])) - elif key_type in filters: - query = query.join(models.Policy).filter( - models.Policy.type == filters[key_type]) - elif key_name in filters: - query = query.join(models.Policy).filter( - models.Policy.name == filters[key_name]) - - keys, dirs = utils.get_sort_params(sort) - return sa_utils.paginate_query(query, models.ClusterPolicies, None, - keys, sort_dirs=dirs).all() - - -def cluster_policy_ids_by_cluster(context, cluster_id): - """an internal API for getting cluster IDs.""" - with session_for_read() as session: - policies = session.query(models.ClusterPolicies.policy_id).filter_by( - cluster_id=cluster_id).all() - return [p[0] for p in policies] - - -def cluster_policy_get_by_type(context, cluster_id, policy_type, filters=None): - with session_for_read() as session: - query = cluster_policy_model_query(session) - query = query.filter_by(cluster_id=cluster_id) - - key_enabled = consts.CP_ENABLED - if filters and key_enabled in filters: - filter_enabled = {key_enabled: filters[key_enabled]} - query = utils.exact_filter(query, models.ClusterPolicies, - filter_enabled) - - query = query.join(models.Policy).filter( - models.Policy.type == policy_type) - - return query.all() - - -def cluster_policy_get_by_name(context, cluster_id, policy_name, filters=None): - with session_for_read() as session: - query = cluster_policy_model_query(session) - query = query.filter_by(cluster_id=cluster_id) - - key_enabled = consts.CP_ENABLED - if filters and key_enabled in filters: - filter_enabled = {key_enabled: filters[key_enabled]} - query = utils.exact_filter(query, models.ClusterPolicies, - filter_enabled) - - query = query.join(models.Policy).filter( - models.Policy.name == policy_name) - - return query.all() - - -@retry_on_deadlock -def cluster_policy_attach(context, cluster_id, policy_id, values): - with session_for_write() as session: - binding = models.ClusterPolicies() - binding.cluster_id = cluster_id - binding.policy_id = policy_id - binding.update(values) - session.add(binding) - # Load foreignkey cluster and policy - return cluster_policy_get(context, cluster_id, policy_id) - - -@retry_on_deadlock -def cluster_policy_detach(context, cluster_id, policy_id): - with session_for_write() as session: - query = session.query(models.ClusterPolicies) - bindings = query.filter_by(cluster_id=cluster_id, - policy_id=policy_id).first() - if bindings is None: - return - session.delete(bindings) - - -@retry_on_deadlock -def cluster_policy_update(context, cluster_id, policy_id, values): - with session_for_write() as session: - query = session.query(models.ClusterPolicies) - binding = query.filter_by(cluster_id=cluster_id, - policy_id=policy_id).first() - - if binding is None: - return None - - binding.update(values) - binding.save(session) - return binding - - -@retry_on_deadlock -def cluster_add_dependents(context, cluster_id, profile_id): - """Add profile ID of container node to host cluster's 'dependents' property - - :param cluster_id: ID of the cluster to be updated. - :param profile_id: Profile ID of the container node. - :raises ResourceNotFound: The specified cluster does not exist in database. - """ - - with session_for_write() as session: - cluster = session.query(models.Cluster).get(cluster_id) - if cluster is None: - raise exception.ResourceNotFound(type='cluster', id=cluster_id) - - profiles = cluster.dependents.get('profiles', []) - profiles.append(profile_id) - cluster.dependents.update({'profiles': profiles}) - cluster.save(session) - - -@retry_on_deadlock -def cluster_remove_dependents(context, cluster_id, profile_id): - """Remove profile ID from host cluster's 'dependents' property - - :param cluster_id: ID of the cluster to be updated. - :param profile_id: Profile ID of the container node. - :raises ResourceNotFound: The specified cluster does not exist in database. - """ - - with session_for_write() as session: - cluster = session.query(models.Cluster).get(cluster_id) - if cluster is None: - raise exception.ResourceNotFound(type='cluster', id=cluster_id) - - profiles = cluster.dependents.get('profiles', []) - if profile_id in profiles: - profiles.remove(profile_id) - if len(profiles) == 0: - cluster.dependents.pop('profiles') - else: - cluster.dependents.update({'profiles': profiles}) - cluster.save(session) - - -# Profiles -def profile_model_query(session): - query = session.query(models.Profile) - return query - - -@retry_on_deadlock -def profile_create(context, values): - with session_for_write() as session: - profile = models.Profile() - profile.update(values) - session.add(profile) - return profile - - -def profile_get(context, profile_id, project_safe=True): - with session_for_read() as session: - query = profile_model_query(session) - profile = query.get(profile_id) - - if profile is None: - return None - - return utils.check_resource_project(context, profile, project_safe) - - -def profile_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, profile_model_query, name, - project_safe=project_safe) - - -def profile_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, profile_model_query, - models.Profile, - short_id, project_safe=project_safe) - - -def profile_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - with session_for_read() as session: - query = profile_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - if filters: - query = utils.exact_filter(query, models.Profile, filters) - - keys, dirs = utils.get_sort_params(sort, consts.PROFILE_CREATED_AT) - if marker: - marker = profile_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Profile, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -@retry_on_deadlock -def profile_update(context, profile_id, values): - with session_for_write() as session: - profile = session.query(models.Profile).get(profile_id) - if not profile: - raise exception.ResourceNotFound(type='profile', id=profile_id) - - profile.update(values) - profile.save(session) - return profile - - -@retry_on_deadlock -def profile_delete(context, profile_id): - with session_for_write() as session: - profile = session.query(models.Profile).get(profile_id) - if profile is None: - return - - # used by any clusters? - clusters = session.query(models.Cluster).filter_by( - profile_id=profile_id) - if clusters.count() > 0: - raise exception.EResourceBusy(type='profile', id=profile_id) - - # used by any nodes? - nodes = session.query(models.Node).filter_by(profile_id=profile_id) - if nodes.count() > 0: - raise exception.EResourceBusy(type='profile', id=profile_id) - session.delete(profile) - - -# Credentials -def credential_model_query(session): - query = session.query(models.Credential) - return query - - -@retry_on_deadlock -def cred_create(context, values): - with session_for_write() as session: - cred = models.Credential() - cred.update(values) - session.add(cred) - return cred - - -def cred_get(context, user, project): - with session_for_read() as session: - return credential_model_query(session).get((user, project)) - - -@retry_on_deadlock -def cred_update(context, user, project, values): - with session_for_write() as session: - cred = session.query(models.Credential).get((user, project)) - cred.update(values) - cred.save(session) - return cred - - -@retry_on_deadlock -def cred_delete(context, user, project): - with session_for_write() as session: - cred = session.query(models.Credential).get((user, project)) - if cred is None: - return None - session.delete(cred) - - -@retry_on_deadlock -def cred_create_update(context, values): - try: - return cred_create(context, values) - except db_exc.DBDuplicateEntry: - user = values.pop('user') - project = values.pop('project') - return cred_update(context, user, project, values) - - -# Events -def event_model_query(session): - query = session.query(models.Event).options( - joinedload(models.Event.cluster) - ) - return query - - -@retry_on_deadlock -def event_create(context, values): - with session_for_write() as session: - event = models.Event() - event.update(values) - session.add(event) - return event - - -@retry_on_deadlock -def event_get(context, event_id, project_safe=True): - with session_for_read() as session: - event = event_model_query(session).get(event_id) - return utils.check_resource_project(context, event, project_safe) - - -def event_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, event_model_query, - models.Event, - short_id, project_safe=project_safe) - - -def _event_filter_paginate_query(session, context, query, filters=None, - limit=None, marker=None, sort=None): - if filters: - query = utils.exact_filter(query, models.Event, filters) - - keys, dirs = utils.get_sort_params(sort, consts.EVENT_TIMESTAMP) - if marker: - marker = event_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Event, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -def event_get_all(context, limit=None, marker=None, sort=None, filters=None, - project_safe=True): - with session_for_read() as session: - query = event_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - return _event_filter_paginate_query(session, context, query, - filters=filters, - limit=limit, marker=marker, - sort=sort) - - -def event_count_by_cluster(context, cluster_id, project_safe=True): - with session_for_read() as session: - query = event_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - count = query.filter_by(cluster_id=cluster_id).count() - - return count - - -def event_get_all_by_cluster(context, cluster_id, limit=None, marker=None, - sort=None, filters=None, project_safe=True): - with session_for_read() as session: - query = event_model_query(session) - query = query.filter_by(cluster_id=cluster_id) - query = utils.filter_query_by_project(query, project_safe, context) - - return _event_filter_paginate_query(session, context, query, - filters=filters, - limit=limit, marker=marker, - sort=sort) - - -@retry_on_deadlock -def event_prune(context, cluster_id, project_safe=True): - with session_for_write() as session: - query = session.query(models.Event).with_for_update() - query = query.filter_by(cluster_id=cluster_id) - query = utils.filter_query_by_project(query, project_safe, context) - - return query.delete(synchronize_session='fetch') - - -@retry_on_deadlock -def event_purge(project, granularity='days', age=30): - with session_for_write() as session: - query = session.query(models.Event).with_for_update() - if project is not None: - query = query.filter(models.Event.project.in_(project)) - if granularity is not None and age is not None: - if granularity == 'days': - age = age * 86400 - elif granularity == 'hours': - age = age * 3600 - elif granularity == 'minutes': - age = age * 60 - time_line = timeutils.utcnow() - datetime.timedelta(seconds=age) - query = query.filter(models.Event.timestamp < time_line) - - return query.delete(synchronize_session='fetch') - - -# Actions -def action_model_query(session): - query = session.query(models.Action).options( - joinedload(models.Action.dep_on), - joinedload(models.Action.dep_by) - ) - return query - - -@retry_on_deadlock -def action_create(context, values): - with session_for_write() as session: - action = models.Action() - action.update(values) - session.add(action) - - return action_get(context, action.id) - - -@retry_on_deadlock -def action_update(context, action_id, values): - with session_for_write() as session: - action = session.query(models.Action).get(action_id) - if not action: - raise exception.ResourceNotFound(type='action', id=action_id) - - action.update(values) - action.save(session) - - -def action_get(context, action_id, project_safe=True, refresh=False): - with session_for_read() as session: - action = action_model_query(session).get(action_id) - if action is None: - return None - - return utils.check_resource_project(context, action, project_safe) - - -def action_list_active_scaling(context, cluster_id=None, project_safe=True): - with session_for_read() as session: - query = action_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - if cluster_id: - query = query.filter_by(target=cluster_id) - query = query.filter( - models.Action.status.in_( - [consts.ACTION_READY, - consts.ACTION_WAITING, - consts.ACTION_RUNNING, - consts.ACTION_WAITING_LIFECYCLE_COMPLETION])) - query = query.filter( - models.Action.action.in_(consts.CLUSTER_SCALE_ACTIONS)) - scaling_actions = query.all() - return scaling_actions - - -def action_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, action_model_query, name, - project_safe=project_safe) - - -def action_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, action_model_query, - models.Action, - short_id, project_safe=project_safe) - - -def action_get_all_by_owner(context, owner_id): - with session_for_read() as session: - query = action_model_query(session).filter_by(owner=owner_id) - return query.all() - - -def action_get_all_active_by_target(context, target_id, project_safe=True): - with session_for_read() as session: - query = action_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - query = query.filter_by(target=target_id) - query = query.filter( - models.Action.status.in_( - [consts.ACTION_READY, - consts.ACTION_WAITING, - consts.ACTION_RUNNING, - consts.ACTION_WAITING_LIFECYCLE_COMPLETION])) - actions = query.all() - return actions - - -def action_get_all(context, filters=None, limit=None, marker=None, sort=None, - project_safe=True): - with session_for_read() as session: - query = action_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - if filters: - query = utils.exact_filter(query, models.Action, filters) - - keys, dirs = utils.get_sort_params(sort, consts.ACTION_CREATED_AT) - if marker: - marker = action_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Action, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -@retry_on_deadlock -def action_check_status(context, action_id, timestamp): - with session_for_write() as session: - q = session.query(models.ActionDependency) - count = q.filter_by(dependent=action_id).count() - if count > 0: - return consts.ACTION_WAITING - - action = session.query(models.Action).get(action_id) - if action.status == consts.ACTION_WAITING: - action.status = consts.ACTION_READY - action.status_reason = 'All depended actions completed.' - action.end_time = timestamp - action.save(session) - - return action.status - - -def action_dependency_model_query(session): - query = session.query(models.ActionDependency) - return query - - -@retry_on_deadlock -def dependency_get_depended(context, action_id): - with session_for_read() as session: - q = action_dependency_model_query(session).filter_by( - dependent=action_id) - return [d.depended for d in q.all()] - - -@retry_on_deadlock -def dependency_get_dependents(context, action_id): - with session_for_read() as session: - q = action_dependency_model_query(session).filter_by( - depended=action_id) - return [d.dependent for d in q.all()] - - -@retry_on_deadlock -def dependency_add(context, depended, dependent): - if isinstance(depended, list) and isinstance(dependent, list): - raise exception.Error( - 'Multiple dependencies between lists not support' - ) - - with session_for_write() as session: - if isinstance(depended, list): # e.g. D depends on A,B,C - for d in depended: - r = models.ActionDependency(depended=d, dependent=dependent) - session.add(r) - - query = session.query(models.Action).with_for_update() - query = query.filter_by(id=dependent) - query.update({'status': consts.ACTION_WAITING, - 'status_reason': 'Waiting for depended actions.'}, - synchronize_session='fetch') - return - - # Only dependent can be a list now, convert it to a list if it - # is not a list - if not isinstance(dependent, list): # e.g. B,C,D depend on A - dependents = [dependent] - else: - dependents = dependent - - for d in dependents: - r = models.ActionDependency(depended=depended, dependent=d) - session.add(r) - - q = session.query(models.Action).with_for_update() - q = q.filter(models.Action.id.in_(dependents)) - q.update({'status': consts.ACTION_WAITING, - 'status_reason': 'Waiting for depended actions.'}, - synchronize_session='fetch') - - -@retry_on_deadlock -def action_mark_succeeded(context, action_id, timestamp): - with session_for_write() as session: - query = session.query(models.Action).filter_by(id=action_id) - values = { - 'owner': None, - 'status': consts.ACTION_SUCCEEDED, - 'status_reason': 'Action completed successfully.', - 'end_time': timestamp, - } - query.update(values, synchronize_session=False) - - subquery = session.query(models.ActionDependency).filter_by( - depended=action_id) - subquery.delete(synchronize_session='fetch') - - -@retry_on_deadlock -def action_mark_ready(context, action_id, timestamp): - with session_for_write() as session: - query = session.query(models.Action).filter_by(id=action_id) - values = { - 'owner': None, - 'status': consts.ACTION_READY, - 'status_reason': 'Lifecycle timeout.', - 'end_time': timestamp, - } - query.update(values, synchronize_session=False) - - -@retry_on_deadlock -def _mark_failed(session, action_id, timestamp, reason=None): - # mark myself as failed - query = session.query(models.Action).filter_by(id=action_id) - values = { - 'owner': None, - 'status': consts.ACTION_FAILED, - 'status_reason': (str(reason) if reason else - 'Action execution failed'), - 'end_time': timestamp, - } - query.update(values, synchronize_session=False) - action = query.all() - - query = session.query(models.ActionDependency) - query = query.filter_by(depended=action_id) - dependents = [d.dependent for d in query.all()] - query.delete(synchronize_session=False) - - if parent_status_update_needed(action): - for d in dependents: - _mark_failed(session, d, timestamp) - - -@retry_on_deadlock -def action_mark_failed(context, action_id, timestamp, reason=None): - with session_for_write() as session: - _mark_failed(session, action_id, timestamp, reason) - - -@retry_on_deadlock -def _mark_cancelled(session, action_id, timestamp, reason=None): - query = session.query(models.Action).filter_by(id=action_id) - values = { - 'owner': None, - 'status': consts.ACTION_CANCELLED, - 'status_reason': (str(reason) if reason else - 'Action execution cancelled'), - 'end_time': timestamp, - } - query.update(values, synchronize_session=False) - action = query.all() - - query = session.query(models.ActionDependency) - query = query.filter_by(depended=action_id) - dependents = [d.dependent for d in query.all()] - query.delete(synchronize_session=False) - - if parent_status_update_needed(action): - for d in dependents: - _mark_cancelled(session, d, timestamp) - - -@retry_on_deadlock -def action_mark_cancelled(context, action_id, timestamp, reason=None): - with session_for_write() as session: - _mark_cancelled(session, action_id, timestamp, reason) - - -@retry_on_deadlock -def action_acquire(context, action_id, owner, timestamp): - with session_for_write() as session: - action = session.query(models.Action).with_for_update().get(action_id) - if not action: - return None - - if action.owner and action.owner != owner: - return None - - if action.status != consts.ACTION_READY: - return None - action.owner = owner - action.start_time = timestamp - action.status = consts.ACTION_RUNNING - action.status_reason = 'The action is being processed.' - action.save(session) - - return action - - -@retry_on_deadlock -def action_acquire_random_ready(context, owner, timestamp): - with session_for_write() as session: - action = (session.query(models.Action). - filter_by(status=consts.ACTION_READY). - filter_by(owner=None). - order_by(func.random()). - with_for_update().first()) - - if action: - action.owner = owner - action.start_time = timestamp - action.status = consts.ACTION_RUNNING - action.status_reason = 'The action is being processed.' - action.save(session) - - return action - - -@retry_on_deadlock -def action_acquire_first_ready(context, owner, timestamp): - with session_for_write() as session: - action = session.query(models.Action).filter_by( - status=consts.ACTION_READY).filter_by( - owner=None).order_by( - consts.ACTION_CREATED_AT or func.random()).first() - if action: - return action_acquire(context, action.id, owner, timestamp) - - -@retry_on_deadlock -def action_abandon(context, action_id, values=None): - """Abandon an action for other workers to execute again. - - This API is always called with the action locked by the current - worker. There is no chance the action is gone or stolen by others. - """ - with session_for_write() as session: - action = session.query(models.Action).get(action_id) - - action.owner = None - action.start_time = None - action.status = consts.ACTION_READY - action.status_reason = 'The action was abandoned.' - if values: - action.update(values) - action.save(session) - return action - - -@retry_on_deadlock -def action_lock_check(context, action_id, owner=None): - with session_for_read() as session: - action = action_model_query(session).get(action_id) - if not action: - raise exception.ResourceNotFound(type='action', id=action_id) - - if owner: - return owner if owner == action.owner else action.owner - else: - return action.owner if action.owner else None - - -@retry_on_deadlock -def action_signal(context, action_id, value): - with session_for_write() as session: - action = session.query(models.Action).get(action_id) - if not action: - return - - action.control = value - action.save(session) - - -def action_signal_query(context, action_id): - with session_for_read() as session: - action = action_model_query(session).get(action_id) - if not action: - return None - - return action.control - - -@retry_on_deadlock -def action_delete(context, action_id): - with session_for_write() as session: - action = session.query(models.Action).get(action_id) - if not action: - return - if ((action.status == consts.ACTION_WAITING) or - (action.status == consts.ACTION_RUNNING) or - (action.status == consts.ACTION_SUSPENDED)): - raise exception.EResourceBusy(type='action', id=action_id) - session.delete(action) - - -@retry_on_deadlock -def action_delete_by_target(context, target, action=None, - action_excluded=None, status=None, - project_safe=True): - if action and action_excluded: - LOG.warning("action and action_excluded cannot be both specified.") - return None - - with session_for_write() as session: - q = session.query(models.Action).filter_by(target=target) - q = utils.filter_query_by_project(q, project_safe, context) - - if action: - q = q.filter(models.Action.action.in_(action)) - if action_excluded: - q = q.filter(~models.Action.action.in_(action_excluded)) - if status: - q = q.filter(models.Action.status.in_(status)) - return q.delete(synchronize_session='fetch') - - -@retry_on_deadlock -def action_purge(project, granularity='days', age=30): - with session_for_write() as session: - query = session.query(models.Action).with_for_update() - if project is not None: - query = query.filter(models.Action.project.in_(project)) - if granularity is not None and age is not None: - if granularity == 'days': - age = age * 86400 - elif granularity == 'hours': - age = age * 3600 - elif granularity == 'minutes': - age = age * 60 - time_line = timeutils.utcnow() - datetime.timedelta(seconds=age) - query = query.filter(models.Action.created_at < time_line) - - # Get dependants to delete - for d in query.all(): - q = session.query(models.ActionDependency).filter_by(depended=d.id) - q.delete(synchronize_session='fetch') - return query.delete(synchronize_session='fetch') - - -# Receivers -def receiver_model_query(session): - query = session.query(models.Receiver) - return query - - -@retry_on_deadlock -def receiver_create(context, values): - with session_for_write() as session: - receiver = models.Receiver() - receiver.update(values) - session.add(receiver) - return receiver - - -def receiver_get(context, receiver_id, project_safe=True): - with session_for_read() as session: - receiver = receiver_model_query(session).get(receiver_id) - if not receiver: - return None - - return utils.check_resource_project(context, receiver, project_safe) - - -def receiver_get_all(context, limit=None, marker=None, filters=None, sort=None, - project_safe=True): - with session_for_read() as session: - query = receiver_model_query(session) - query = utils.filter_query_by_project(query, project_safe, context) - - if filters: - query = utils.exact_filter(query, models.Receiver, filters) - - keys, dirs = utils.get_sort_params(sort, consts.RECEIVER_NAME) - if marker: - marker = receiver_model_query(session).get(marker) - return sa_utils.paginate_query(query, models.Receiver, limit, keys, - marker=marker, sort_dirs=dirs).all() - - -def receiver_get_by_name(context, name, project_safe=True): - with session_for_read() as session: - return query_by_name(session, context, receiver_model_query, name, - project_safe=project_safe) - - -def receiver_get_by_short_id(context, short_id, project_safe=True): - with session_for_read() as session: - return query_by_short_id(session, context, receiver_model_query, - models.Receiver, - short_id, project_safe=project_safe) - - -@retry_on_deadlock -def receiver_delete(context, receiver_id): - with session_for_write() as session: - receiver = session.query(models.Receiver).get(receiver_id) - if not receiver: - return - session.delete(receiver) - - -@retry_on_deadlock -def receiver_update(context, receiver_id, values): - with session_for_write() as session: - receiver = session.query(models.Receiver).get(receiver_id) - if not receiver: - raise exception.ResourceNotFound(type='receiver', id=receiver_id) - - receiver.update(values) - receiver.save(session) - return receiver - - -@retry_on_deadlock -def service_create(service_id, host=None, binary=None, topic=None, - time_now=None): - with session_for_write() as session: - if not time_now: - time_now = timeutils.utcnow(True) - service = models.Service(id=service_id, host=host, binary=binary, - topic=topic, created_at=time_now, - updated_at=time_now) - session.add(service) - return service - - -@retry_on_deadlock -def service_update(service_id, values=None): - if values is None: - values = {} - - with session_for_write() as session: - service = session.query(models.Service).get(service_id) - if not service: - return - - values.update({'updated_at': timeutils.utcnow(True)}) - service.update(values) - service.save(session) - - return service - - -@retry_on_deadlock -def _service_delete(session, service_id): - session.query(models.Service).filter_by( - id=service_id).delete( - synchronize_session='fetch' - ) - - -def service_delete(service_id): - with session_for_write() as session: - _service_delete(session, service_id) - - -def service_get(service_id): - with session_for_read() as session: - return session.query(models.Service).get(service_id) - - -def service_get_all(): - with session_for_read() as session: - return session.query(models.Service).all() - - -def service_cleanup_all_expired(binary): - with session_for_write() as session: - date_limit = service_expired_time() - services = session.query(models.Service).filter( - and_(models.Service.binary == binary, - models.Service.updated_at <= date_limit) - ) - - for service in services: - LOG.info( - 'Breaking locks for dead service %(binary)s ' - '(id: %(service_id)s)', - { - 'binary': binary, - 'service_id': service['id'], - } - ) - _gc_by_engine(session, service['id']) - LOG.info( - 'Done breaking locks for service %(binary)s ' - '(id: %(service_id)s)', - { - 'binary': binary, - 'service_id': service['id'], - } - ) - _service_delete(session, service['id']) - - return services - - -@retry_on_deadlock -def _mark_engine_failed(session, action_id, timestamp, reason=None): - query = session.query(models.ActionDependency) - # process cluster actions - d_query = query.filter_by(dependent=action_id) - dependents = [d.depended for d in d_query.all()] - if dependents: - for d in dependents: - _mark_engine_failed(session, d, timestamp, reason) - else: - depended = query.filter_by(depended=action_id) - depended.delete(synchronize_session=False) - - # TODO(anyone): this will mark all depended actions' status to 'FAILED' - # even the action belong to other engines and the action is running - # mark myself as failed - action = session.query(models.Action).filter_by(id=action_id).first() - values = { - 'owner': None, - 'status': consts.ACTION_FAILED, - 'status_reason': (str(reason) if reason else - 'Action execution failed'), - 'end_time': timestamp, - } - action.update(values) - action.save(session) - - -@retry_on_deadlock -def dummy_gc(engine_id): - with session_for_write() as session: - q_actions = session.query(models.Action).filter_by(owner=engine_id) - timestamp = time.time() - for action in q_actions.all(): - _mark_engine_failed(session, action.id, timestamp, - reason='Engine failure') - # Release all node locks - query = (session.query(models.NodeLock). - filter_by(action_id=action.id)) - query.delete(synchronize_session=False) - - # Release all cluster locks - for clock in session.query(models.ClusterLock).all(): - res = _release_cluster_lock(session, clock, action.id, -1) - if not res: - _release_cluster_lock(session, clock, action.id, 1) - - -@retry_on_deadlock -def _gc_by_engine(session, engine_id): - # Get all actions locked by an engine - - q_actions = session.query(models.Action).filter_by(owner=engine_id) - timestamp = time.time() - for a in q_actions.all(): - # Release all node locks - query = session.query(models.NodeLock).filter_by(action_id=a.id) - query.delete(synchronize_session=False) - - # Release all cluster locks - for cl in session.query(models.ClusterLock).all(): - res = _release_cluster_lock(session, cl, a.id, -1) - if not res: - _release_cluster_lock(session, cl, a.id, 1) - - # mark action failed and release lock - _mark_failed(session, a.id, timestamp, reason='Engine failure') - - -def gc_by_engine(engine_id): - with session_for_write() as session: - _gc_by_engine(session, engine_id) - - -# HealthRegistry -def health_registry_model_query(session): - query = session.query(models.HealthRegistry) - return query - - -@retry_on_deadlock -def registry_create(context, cluster_id, check_type, interval, params, - engine_id, enabled=True): - with session_for_write() as session: - registry = models.HealthRegistry() - registry.cluster_id = cluster_id - registry.check_type = check_type - registry.interval = interval - registry.params = params - registry.engine_id = engine_id - registry.enabled = enabled - session.add(registry) - - return registry - - -@retry_on_deadlock -def registry_update(context, cluster_id, values): - with session_for_write() as session: - query = session.query(models.HealthRegistry).with_for_update() - registry = query.filter_by(cluster_id=cluster_id).first() - if registry: - registry.update(values) - registry.save(session) - - -@retry_on_deadlock -def registry_claim(context, engine_id): - with session_for_write() as session: - engines = session.query(models.Service).all() - svc_ids = [e.id for e in engines if not utils.is_service_dead(e)] - q_reg = session.query(models.HealthRegistry).with_for_update() - if svc_ids: - q_reg = q_reg.filter( - models.HealthRegistry.engine_id.notin_(svc_ids)) - - result = q_reg.all() - q_reg.update({'engine_id': engine_id}, synchronize_session=False) - - return result - - -@retry_on_deadlock -def registry_delete(context, cluster_id): - with session_for_write() as session: - registry = session.query(models.HealthRegistry).filter_by( - cluster_id=cluster_id).first() - if registry is None: - return - session.delete(registry) - - -def registry_get(context, cluster_id): - with session_for_read() as session: - registry = session.query(models.HealthRegistry).filter_by( - cluster_id=cluster_id).first() - - return registry - - -def registry_get_by_param(context, params): - with session_for_read() as session: - query = health_registry_model_query(session) - obj = utils.exact_filter(query, models.HealthRegistry, params).first() - return obj - - -def registry_list_ids_by_service(context, engine_id): - with session_for_read() as session: - return session.query(models.HealthRegistry.cluster_id).filter_by( - engine_id=engine_id).all() - - -# Utils -def db_sync(db_url): - """Migrate the database to `version` or the most recent version.""" - return migration.db_sync(db_url) - - -def db_version(): - """Display the current database version.""" - return migration.db_version() - - -def parent_status_update_needed(action): - """Return if the status of the parent action needs to be updated - - Return value for update_parent_status key in action inputs - """ - return (len(action) > 0 and hasattr(action[0], 'inputs') and - action[0].inputs.get('update_parent_status', True)) diff --git a/senlin/db/sqlalchemy/migration.py b/senlin/db/sqlalchemy/migration.py deleted file mode 100644 index 84cea347e..000000000 --- a/senlin/db/sqlalchemy/migration.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import sys - -from alembic import command as alembic_command -from alembic.config import Config -from alembic import migration as alembic_migration -from oslo_config import cfg - -from senlin.db.sqlalchemy import api as db_api - -CONF = cfg.CONF - - -def get_alembic_config(db_url=None): - alembic_dir = os.path.join(os.path.dirname(__file__), - os.pardir, 'db/sqlalchemy') - alembic_cfg = Config(os.path.join(alembic_dir, 'alembic.ini'), - stdout=sys.stdout) - alembic_cfg.set_main_option( - 'script_location', 'senlin.db.sqlalchemy:alembic') - if db_url: - alembic_cfg.set_main_option('sqlalchemy.url', db_url) - else: - alembic_cfg.set_main_option('sqlalchemy.url', - CONF['database'].connection) - return alembic_cfg - - -def db_sync(db_url): - alembic_command.upgrade( - get_alembic_config(db_url), 'head' - ) - - -def db_version(): - engine = db_api.get_engine() - with engine.connect() as connection: - m_context = alembic_migration.MigrationContext.configure(connection) - return m_context.get_current_revision() diff --git a/senlin/db/sqlalchemy/models.py b/senlin/db/sqlalchemy/models.py deleted file mode 100644 index a7dbf6fda..000000000 --- a/senlin/db/sqlalchemy/models.py +++ /dev/null @@ -1,296 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for Senlin data. -""" - -from oslo_db.sqlalchemy import models -from oslo_utils import uuidutils -from sqlalchemy import Boolean, Column, Numeric, ForeignKey, Integer -from sqlalchemy import String, Text -from sqlalchemy.ext import declarative -from sqlalchemy.orm import backref -from sqlalchemy.orm import relationship - -from senlin.db.sqlalchemy import types - -BASE = declarative.declarative_base() -UUID4 = uuidutils.generate_uuid - - -class TimestampMixin(object): - created_at = Column(types.TZAwareDateTime) - updated_at = Column(types.TZAwareDateTime) - - -class Profile(BASE, TimestampMixin, models.ModelBase): - """Profile objects.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'profile' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - name = Column(String(255)) - type = Column(String(255)) - context = Column(types.Dict) - spec = Column(types.Dict) - user = Column(String(32), nullable=False) - project = Column(String(32), nullable=False) - domain = Column(String(32)) - permission = Column(String(32)) - meta_data = Column(types.Dict) - - -class Policy(BASE, TimestampMixin, models.ModelBase): - """Policy objects.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'policy' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - user = Column(String(32), nullable=False) - project = Column(String(32), nullable=False) - domain = Column(String(32)) - name = Column(String(255)) - type = Column(String(255)) - cooldown = Column(Integer) - level = Column(Integer) - spec = Column(types.Dict) - data = Column(types.Dict) - - -class Cluster(BASE, TimestampMixin, models.ModelBase): - """Cluster objects.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'cluster' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - name = Column('name', String(255)) - profile_id = Column(String(36), ForeignKey('profile.id'), nullable=False) - user = Column(String(32), nullable=False) - project = Column(String(32), nullable=False) - domain = Column(String(32)) - parent = Column(String(36)) - - init_at = Column(types.TZAwareDateTime) - - min_size = Column(Integer) - max_size = Column(Integer) - desired_capacity = Column(Integer) - next_index = Column(Integer) - timeout = Column(Integer) - - status = Column(String(255)) - status_reason = Column(Text) - meta_data = Column(types.Dict) - data = Column(types.Dict) - dependents = Column(types.Dict) - config = Column(types.Dict) - - profile = relationship(Profile) - - -class Node(BASE, TimestampMixin, models.ModelBase): - """Node objects.""" - - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'node' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - name = Column(String(255)) - physical_id = Column(String(36)) - cluster_id = Column(String(36)) - profile_id = Column(String(36), ForeignKey('profile.id')) - user = Column(String(32), nullable=False) - project = Column(String(32), nullable=False) - domain = Column(String(32)) - index = Column(Integer) - role = Column(String(64)) - - init_at = Column(types.TZAwareDateTime) - - tainted = Column(Boolean) - status = Column(String(255)) - status_reason = Column(Text) - meta_data = Column(types.Dict) - data = Column(types.Dict) - dependents = Column(types.Dict) - profile = relationship(Profile, backref=backref('nodes')) - cluster = relationship(Cluster, backref=backref('nodes'), - foreign_keys=[cluster_id], - primaryjoin='Cluster.id == Node.cluster_id') - - -class ClusterLock(BASE, models.ModelBase): - """Cluster locks for actions.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'cluster_lock' - - cluster_id = Column(String(36), primary_key=True, nullable=False) - action_ids = Column(types.List) - semaphore = Column(Integer) - - -class NodeLock(BASE, models.ModelBase): - """Node locks for actions.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'node_lock' - - node_id = Column(String(36), primary_key=True, nullable=False) - action_id = Column(String(36)) - - -class ClusterPolicies(BASE, models.ModelBase): - """Association between clusters and policies.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'cluster_policy' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - cluster_id = Column(String(36), ForeignKey('cluster.id'), nullable=False) - policy_id = Column(String(36), ForeignKey('policy.id'), nullable=False) - cluster = relationship(Cluster, backref=backref('policies')) - policy = relationship(Policy, backref=backref('bindings')) - enabled = Column(Boolean) - priority = Column(Integer) - data = Column(types.Dict) - last_op = Column(types.TZAwareDateTime) - - -class HealthRegistry(BASE, models.ModelBase): - """Clusters registered for health management.""" - - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'health_registry' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - cluster_id = Column(String(36), ForeignKey('cluster.id'), nullable=False) - check_type = Column('check_type', String(255)) - interval = Column(Integer) - params = Column(types.Dict) - enabled = Column(Boolean) - engine_id = Column('engine_id', String(36)) - - -class Receiver(BASE, TimestampMixin, models.ModelBase): - """Receiver objects associated with clusters.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'receiver' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - name = Column('name', String(255)) - type = Column(String(255)) - user = Column(String(32)) - project = Column(String(32)) - domain = Column(String(32)) - - cluster_id = Column(String(36), ForeignKey('cluster.id')) - actor = Column(types.Dict) - action = Column(Text) - params = Column(types.Dict) - channel = Column(types.Dict) - - -class Credential(BASE, models.ModelBase): - """User credentials for keystone trusts etc.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'credential' - - user = Column(String(32), primary_key=True, nullable=False) - project = Column(String(32), primary_key=True, nullable=False) - cred = Column(types.Dict, nullable=False) - data = Column(types.Dict) - - -class ActionDependency(BASE, models.ModelBase): - """Action dependencies.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'dependency' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - depended = Column('depended', String(36), ForeignKey('action.id'), - nullable=False) - dependent = Column('dependent', String(36), ForeignKey('action.id'), - nullable=False) - - -class Action(BASE, TimestampMixin, models.ModelBase): - """Action objects.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'action' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - name = Column(String(63)) - cluster_id = Column(String(36)) - context = Column(types.Dict) - target = Column(String(36)) - action = Column(Text) - cause = Column(String(255)) - owner = Column(String(36)) - interval = Column(Integer) - start_time = Column(Numeric(18, 6)) - end_time = Column(Numeric(18, 6)) - timeout = Column(Integer) - status = Column(String(255)) - status_reason = Column(Text) - control = Column(String(255)) - inputs = Column(types.Dict) - outputs = Column(types.Dict) - data = Column(types.Dict) - user = Column(String(32)) - project = Column(String(32)) - domain = Column(String(32)) - - dep_on = relationship( - ActionDependency, - primaryjoin="Action.id == ActionDependency.dependent") - dep_by = relationship( - ActionDependency, - primaryjoin="Action.id == ActionDependency.depended") - - -class Event(BASE, models.ModelBase): - """Events generated by the Senin engine.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'event' - - id = Column('id', String(36), primary_key=True, default=lambda: UUID4()) - timestamp = Column(types.TZAwareDateTime) - oid = Column(String(36)) - oname = Column(String(255)) - otype = Column(String(36)) - cluster_id = Column(String(36), ForeignKey('cluster.id'), nullable=True) - cluster = relationship(Cluster, backref=backref('events')) - level = Column(String(64)) - user = Column(String(32)) - project = Column(String(32)) - action = Column(String(36)) - status = Column(String(255)) - status_reason = Column(Text) - meta_data = Column(types.Dict) - - def as_dict(self): - data = super(Event, self)._as_dict() - ts = data['timestamp'].replace(microsecond=0).isoformat() - data['timestamp'] = ts - return data - - -class Service(BASE, TimestampMixin, models.ModelBase): - """Senlin service engine registry.""" - __table_args__ = {'mysql_engine': 'InnoDB'} - __tablename__ = 'service' - - id = Column('id', String(36), primary_key=True, nullable=False) - host = Column(String(255)) - binary = Column(String(255)) - topic = Column(String(255)) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) diff --git a/senlin/db/sqlalchemy/types.py b/senlin/db/sqlalchemy/types.py deleted file mode 100644 index afa6fcbab..000000000 --- a/senlin/db/sqlalchemy/types.py +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import pytz - -from sqlalchemy.dialects import mysql -from sqlalchemy.ext import mutable -from sqlalchemy import types - - -class MutableList(mutable.Mutable, list): - @classmethod - def coerce(cls, key, value): - if not isinstance(value, MutableList): - if isinstance(value, list): - return MutableList(value) - return mutable.Mutable.coerce(key, value) - else: - return value - - def __init__(self, initval=None): - list.__init__(self, initval or []) - - def __getitem__(self, key): - value = list.__getitem__(self, key) - for obj, key in self._parents.items(): - value._parents[obj] = key - return value - - def __setitem__(self, key, value): - list.__setitem__(self, key, value) - self.changed() - - def __getstate__(self): - return list(self) - - def __setstate__(self, state): - self[:] = state - - def append(self, value): - list.append(self, value) - self.changed() - - def extend(self, iterable): - list.extend(self, iterable) - self.changed() - - def insert(self, index, item): - list.insert(self, index, item) - self.changed() - - def __setslice__(self, i, j, other): - list.__setslice__(self, i, j, other) - self.changed() - - def pop(self, index=-1): - item = list.pop(self, index) - self.changed() - return item - - def remove(self, value): - list.remove(self, value) - self.changed() - - -class Dict(types.TypeDecorator): - impl = types.Text - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor(mysql.LONGTEXT()) - else: - return self.impl - - def process_bind_param(self, value, dialect): - return jsonutils.dumps(value) - - def process_result_value(self, value, dialect): - if value is None: - return None - return jsonutils.loads(value) - - -class List(types.TypeDecorator): - impl = types.Text - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor(mysql.LONGTEXT()) - else: - return self.impl - - def process_bind_param(self, value, dialect): - return jsonutils.dumps(value) - - def process_result_value(self, value, dialect): - if value is None: - return None - return jsonutils.loads(value) - - -class TZAwareDateTime(types.TypeDecorator): - """A DB type that is time zone aware.""" - impl = types.DateTime - - def process_bind_param(self, value, dialect): - if value is None: - return None - if dialect.name == 'mysql': - return timeutils.normalize_time(value) - - return value - - def process_result_value(self, value, dialect): - if value is None: - return None - return value.replace(tzinfo=pytz.utc) - - -mutable.MutableDict.associate_with(Dict) -MutableList.associate_with(List) diff --git a/senlin/db/sqlalchemy/utils.py b/senlin/db/sqlalchemy/utils.py deleted file mode 100644 index 1daa2a996..000000000 --- a/senlin/db/sqlalchemy/utils.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - - -def exact_filter(query, model, filters): - """Applies exact match filtering to a query. - - Returns the updated query. Modifies filters argument to remove - filters consumed. - - :param query: query to apply filters to - :param model: model object the query applies to, for IN-style - filtering - :param filters: dictionary of filters; values that are lists, - tuples, sets, or frozensets cause an 'IN' test to - be performed, while exact matching ('==' operator) - is used for other values - """ - - filter_dict = {} - if filters is None: - filters = {} - - for key, value in filters.items(): - if isinstance(value, (list, tuple, set, frozenset)): - column_attr = getattr(model, key) - query = query.filter(column_attr.in_(value)) - else: - filter_dict[key] = value - - if filter_dict: - query = query.filter_by(**filter_dict) - - return query - - -def filter_query_by_project(q, project_safe, context): - """Filters a query to the context's project - - Returns the updated query, Adds filter to limit project to the - context's project for non-admin users. For admin users, - the query is returned unmodified. - - :param query: query to apply filters to - :param project_safe: boolean indicating if project restriction filter - should be applied - :param context: context of the query - - """ - - if project_safe and not context.is_admin: - return q.filter_by(project=context.project_id) - - return q - - -def check_resource_project(context, resource, project_safe): - """Check if the resource's project matches the context's project - - For non-admin users, if project_safe is set and the resource's project - does not match the context's project, none is returned. - Otherwise return the resource unmodified. - - :param context: context of the call - :param resource: resource to check - :param project_safe: boolean indicating if project restriction should be - checked. - """ - - if resource is None: - return resource - - if project_safe and not context.is_admin: - if context.project_id != resource.project: - return None - - return resource - - -def get_sort_params(value, default_key=None): - """Parse a string into a list of sort_keys and a list of sort_dirs. - - :param value: A string that contains the sorting parameters. - :param default_key: An optional key set as the default sorting key when - no sorting option value is specified. - - :return: A list of sorting keys and a list of sorting dirs. - """ - keys = [] - dirs = [] - if value: - for s in value.split(','): - s_key, _s, s_dir = s.partition(':') - keys.append(s_key) - s_dir = s_dir or 'asc' - nulls_appendix = 'nullsfirst' if s_dir == 'asc' else 'nullslast' - sort_dir = '-'.join([s_dir, nulls_appendix]) - dirs.append(sort_dir) - elif default_key: - # use default if specified - return [default_key, 'id'], ['asc-nullsfirst', 'asc'] - - if 'id' not in keys: - keys.append('id') - dirs.append('asc') - - return keys, dirs - - -def is_service_dead(service): - """Check if a given service is dead.""" - cfg.CONF.import_opt("periodic_interval", "senlin.conf") - max_elapse = 2 * cfg.CONF.periodic_interval - - return timeutils.is_older_than(service.updated_at, max_elapse) diff --git a/senlin/drivers/__init__.py b/senlin/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/drivers/base.py b/senlin/drivers/base.py deleted file mode 100644 index feb77e498..000000000 --- a/senlin/drivers/base.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg - -from senlin.engine import environment - -CONF = cfg.CONF - - -class DriverBase(object): - """Base class for all drivers.""" - - def __init__(self, params): - self.conn_params = copy.deepcopy(params) - - -class SenlinDriver(object): - """Generic driver class""" - - def __init__(self, backend_name=None): - - if backend_name is None: - backend_name = cfg.CONF.cloud_backend - - backend = environment.global_env().get_driver(backend_name) - - self.compute = backend.compute - self.loadbalancing = backend.loadbalancing - self.network = backend.network - self.octavia = backend.octavia - self.orchestration = backend.orchestration - self.identity = backend.identity - self.message = backend.message - self.workflow = backend.workflow - self.block_storage = backend.block_storage - self.glance = backend.glance diff --git a/senlin/drivers/container/__init__.py b/senlin/drivers/container/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/drivers/container/docker_v1.py b/senlin/drivers/container/docker_v1.py deleted file mode 100644 index b127e84fa..000000000 --- a/senlin/drivers/container/docker_v1.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import docker - -from senlin.drivers import sdk - - -class DockerClient(object): - """Docker driver.""" - - def __init__(self, url): - self._dockerclient = docker.APIClient(base_url=url, version='auto') - - @sdk.translate_exception - def container_create(self, image, name=None, command=None): - return self._dockerclient.create_container(name=name, image=image, - command=command) - - @sdk.translate_exception - def container_delete(self, container): - self._dockerclient.remove_container(container) - return True - - @sdk.translate_exception - def restart(self, container, timeout=None): - params = {'timeout': timeout} if timeout else {} - self._dockerclient.restart(container, **params) - - @sdk.translate_exception - def pause(self, container): - self._dockerclient.pause(container) - - @sdk.translate_exception - def unpause(self, container): - self._dockerclient.unpause(container) - - @sdk.translate_exception - def start(self, container): - self._dockerclient.start(container) - - @sdk.translate_exception - def stop(self, container, timeout=None): - params = {'timeout': timeout} - self._dockerclient.stop(container, **params) - - @sdk.translate_exception - def rename(self, container, name): - self._dockerclient.rename(container, name) diff --git a/senlin/drivers/os/__init__.py b/senlin/drivers/os/__init__.py deleted file mode 100644 index 3559d2f1b..000000000 --- a/senlin/drivers/os/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers.os import cinder_v2 -from senlin.drivers.os import glance_v2 -from senlin.drivers.os import heat_v1 -from senlin.drivers.os import keystone_v3 -from senlin.drivers.os import lbaas -from senlin.drivers.os import mistral_v2 -from senlin.drivers.os import neutron_v2 -from senlin.drivers.os import nova_v2 -from senlin.drivers.os import octavia_v2 -from senlin.drivers.os import zaqar_v2 - - -block_storage = cinder_v2.CinderClient -compute = nova_v2.NovaClient -glance = glance_v2.GlanceClient -identity = keystone_v3.KeystoneClient -loadbalancing = lbaas.LoadBalancerDriver -message = zaqar_v2.ZaqarClient -network = neutron_v2.NeutronClient -octavia = octavia_v2.OctaviaClient -orchestration = heat_v1.HeatClient -workflow = mistral_v2.MistralClient diff --git a/senlin/drivers/os/cinder_v2.py b/senlin/drivers/os/cinder_v2.py deleted file mode 100644 index 56173b178..000000000 --- a/senlin/drivers/os/cinder_v2.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class CinderClient(base.DriverBase): - """Cinder V2 driver.""" - - def __init__(self, params): - super(CinderClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='block-storage') - self.session = self.conn.session - - @sdk.translate_exception - def volume_get(self, volume): - res = self.conn.block_store.get_volume(volume) - return res - - @sdk.translate_exception - def volume_create(self, **attr): - return self.conn.block_store.create_volume(**attr) - - @sdk.translate_exception - def volume_delete(self, volume, ignore_missing=True): - self.conn.block_store.delete_volume(volume, - ignore_missing=ignore_missing) - - @sdk.translate_exception - def snapshot_create(self, **attr): - return self.conn.block_store.create_snapshot(**attr) - - @sdk.translate_exception - def snapshot_delete(self, snapshot, ignore_missing=True): - self.conn.block_store.delete_snapshot(snapshot, - ignore_missing=ignore_missing) - - @sdk.translate_exception - def snapshot_get(self, snapshot): - return self.conn.block_store.get_snapshot(snapshot) - - @sdk.translate_exception - def volume_type_get(self, volume_type, ignore_missing=True): - return self.conn.block_store.find_type(volume_type, - ignore_missing=ignore_missing) - - @sdk.translate_exception - def volume_type_create(self, **attr): - return self.conn.block_store.create_type(**attr) - - @sdk.translate_exception - def volume_type_delete(self, volume_type, ignore_missing=True): - return self.conn.block_store.delete_type(volume_type, - ignore_missing=ignore_missing) diff --git a/senlin/drivers/os/glance_v2.py b/senlin/drivers/os/glance_v2.py deleted file mode 100644 index bd31c4946..000000000 --- a/senlin/drivers/os/glance_v2.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class GlanceClient(base.DriverBase): - """Glance V2 driver.""" - - def __init__(self, params): - super(GlanceClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='image') - self.session = self.conn.session - - @sdk.translate_exception - def image_find(self, name_or_id, ignore_missing=True): - return self.conn.image.find_image(name_or_id, ignore_missing) - - @sdk.translate_exception - def image_get(self, image): - return self.conn.image.get_image(image) - - @sdk.translate_exception - def image_delete(self, name_or_id, ignore_missing=False): - return self.conn.image.delete_image(name_or_id, ignore_missing) diff --git a/senlin/drivers/os/heat_v1.py b/senlin/drivers/os/heat_v1.py deleted file mode 100644 index 18cee64b3..000000000 --- a/senlin/drivers/os/heat_v1.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.drivers import base -from senlin.drivers import sdk - - -class HeatClient(base.DriverBase): - """Heat V1 driver.""" - - def __init__(self, params): - super(HeatClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='orchestration') - - @sdk.translate_exception - def stack_create(self, **params): - return self.conn.orchestration.create_stack(**params) - - @sdk.translate_exception - def stack_get(self, stack_id): - return self.conn.orchestration.get_stack(stack_id) - - @sdk.translate_exception - def stack_find(self, name_or_id): - return self.conn.orchestration.find_stack(name_or_id) - - @sdk.translate_exception - def stack_list(self): - return self.conn.orchestration.stacks() - - @sdk.translate_exception - def stack_update(self, stack_id, **params): - return self.conn.orchestration.update_stack(stack_id, **params) - - @sdk.translate_exception - def stack_delete(self, stack_id, ignore_missing=True): - return self.conn.orchestration.delete_stack(stack_id, - ignore_missing) - - @sdk.translate_exception - def stack_check(self, stack_id): - return self.conn.orchestration.check_stack(stack_id) - - @sdk.translate_exception - def stack_get_environment(self, stack_id): - return self.conn.orchestration.get_stack_environment(stack_id) - - @sdk.translate_exception - def stack_get_files(self, stack_id): - return self.conn.orchestration.get_stack_files(stack_id) - - @sdk.translate_exception - def stack_get_template(self, stack_id): - return self.conn.orchestration.get_stack_template(stack_id) - - @sdk.translate_exception - def wait_for_stack(self, stack_id, status, failures=None, interval=2, - timeout=None): - if failures is None: - failures = [] - - if timeout is None: - timeout = cfg.CONF.default_action_timeout - - stack_obj = self.conn.orchestration.find_stack(stack_id, False) - if stack_obj: - self.conn.orchestration.wait_for_status( - stack_obj, status, failures, interval, timeout) - - @sdk.translate_exception - def wait_for_stack_delete(self, stack_id, timeout=None): - """Wait for stack deleting complete""" - if timeout is None: - timeout = cfg.CONF.default_action_timeout - - server_obj = self.conn.orchestration.find_stack(stack_id, True) - if server_obj: - self.conn.orchestration.wait_for_delete(server_obj, wait=timeout) - - return diff --git a/senlin/drivers/os/keystone_v3.py b/senlin/drivers/os/keystone_v3.py deleted file mode 100644 index b47a25c87..000000000 --- a/senlin/drivers/os/keystone_v3.py +++ /dev/null @@ -1,164 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from senlin.drivers import base -from senlin.drivers import sdk - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class KeystoneClient(base.DriverBase): - """Keystone V3 driver.""" - - def __init__(self, params): - super(KeystoneClient, self).__init__(params) - self.conn = sdk.create_connection(params) - self.session = self.conn.session - - @sdk.translate_exception - def trust_get_by_trustor(self, trustor, trustee=None, project=None): - """Get trust by trustor. - - Note we cannot provide two or more filters to keystone due to - constraints in keystone implementation. We do additional filtering - after the results are returned. - - :param trustor: ID of the trustor; - :param trustee: ID of the trustee; - :param project: ID of the project to which the trust is scoped. - :returns: The trust object or None if no matching trust is found. - """ - filters = {'trustor_user_id': trustor} - - trusts = [t for t in self.conn.identity.trusts(**filters)] - - for trust in trusts: - if (trustee and trust.trustee_user_id != trustee): - continue - - if (project and trust.project_id != project): - continue - - return trust - - return None - - @sdk.translate_exception - def trust_create(self, trustor, trustee, project, roles=None, - impersonation=True): - """Create trust between two users. - - :param trustor: ID of the user who is the trustor. - :param trustee: ID of the user who is the trustee. - :param project: Scope of the trust which is a project ID. - :param roles: List of roles the trustee will inherit from the trustor. - :param impersonation: Whether the trustee is allowed to impersonate - the trustor. - """ - # inherit the role of the trustor, unless CONF.trust_roles is set - if CONF.trust_roles: - role_list = [{'name': role} for role in CONF.trust_roles] - elif roles: - role_list = [{'name': role} for role in roles] - else: - role_list = [] - params = { - 'trustor_user_id': trustor, - 'trustee_user_id': trustee, - 'project_id': project, - 'impersonation': impersonation, - 'allow_redelegation': True, - 'roles': role_list - } - - result = self.conn.identity.create_trust(**params) - - return result - - @classmethod - @sdk.translate_exception - def get_token(cls, **creds): - """Get token using given credential""" - - access_info = sdk.authenticate(**creds) - return access_info['token'] - - @classmethod - @sdk.translate_exception - def get_user_id(cls, **creds): - """Get ID of the user with given credential""" - - access_info = sdk.authenticate(**creds) - return access_info['user_id'] - - @classmethod - def get_service_credentials(cls, **kwargs): - """Senlin service credential to use with Keystone. - - :param kwargs: An additional keyword argument list that can be used - for customizing the default settings. - """ - - creds = { - 'auth_url': CONF.authentication.auth_url, - 'username': CONF.authentication.service_username, - 'password': CONF.authentication.service_password, - 'project_name': CONF.authentication.service_project_name, - 'user_domain_name': cfg.CONF.authentication.service_user_domain, - 'project_domain_name': - cfg.CONF.authentication.service_project_domain, - 'verify': cfg.CONF.authentication.verify_ssl, - 'interface': cfg.CONF.authentication.interface, - } - if cfg.CONF.authentication.certfile and \ - cfg.CONF.authentication.keyfile: - creds['cert'] = cfg.CONF.authentication.certfile - creds['key'] = cfg.CONF.authentication.keyfile - if cfg.CONF.authentication.cafile: - creds['cacert'] = cfg.CONF.authentication.cafile - - creds.update(**kwargs) - return creds - - @sdk.translate_exception - def validate_regions(self, regions): - """Check whether the given regions are valid. - - :param regions: A list of regions for validation. - :returns: A list of regions that are found available on keystone. - """ - region_list = self.conn.identity.regions() - known = [r['id'] for r in region_list] - - validated = [] - for r in regions: - if r in known: - validated.append(r) - else: - LOG.warning('Region %s is not found.', r) - - return validated - - @sdk.translate_exception - def get_senlin_endpoint(self): - """Get Senlin service endpoint.""" - region = cfg.CONF.default_region_name - interface = cfg.CONF.authentication.interface - base = self.conn.session.get_endpoint(service_type='clustering', - interface=interface, - region_name=region) - - return base diff --git a/senlin/drivers/os/lbaas.py b/senlin/drivers/os/lbaas.py deleted file mode 100644 index d52f8efef..000000000 --- a/senlin/drivers/os/lbaas.py +++ /dev/null @@ -1,374 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from oslo_context import context as oslo_context -from oslo_log import log as logging - -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.drivers import base -from senlin.drivers.os import neutron_v2 as neutronclient -from senlin.drivers.os import octavia_v2 as octaviaclient -from senlin.profiles import base as pb - -LOG = logging.getLogger(__name__) - - -class LoadBalancerDriver(base.DriverBase): - """Load-balancing driver based on Neutron LBaaS V2 service.""" - - def __init__(self, params): - super(LoadBalancerDriver, self).__init__(params) - self.lb_status_timeout = 600 - self._oc = None - self._nc = None - - def oc(self): - """Octavia client - - :return: octavia client - """ - - if self._oc: - return self._oc - - self._oc = octaviaclient.OctaviaClient(self.conn_params) - return self._oc - - def nc(self): - """Neutron client - - :return: neutron client - """ - if self._nc: - return self._nc - - self._nc = neutronclient.NeutronClient(self.conn_params) - return self._nc - - def _wait_for_lb_ready(self, lb_id, ignore_not_found=False): - """Keep waiting until loadbalancer is ready - - This method will keep waiting until loadbalancer resource specified - by lb_id becomes ready, i.e. its provisioning_status is ACTIVE. - - :param lb_id: ID of the load-balancer to check. - :param ignore_not_found: if set to True, nonexistent loadbalancer - resource is also an acceptable result. - """ - waited = 0 - while waited < self.lb_status_timeout: - try: - lb = self.oc().loadbalancer_get(lb_id, ignore_missing=True) - except exception.InternalError as ex: - LOG.exception('Failed in getting loadbalancer: %s.', ex) - return False - if lb is None: - lb_ready = ignore_not_found - else: - lb_ready = lb.provisioning_status == 'ACTIVE' - if lb_ready is True: - return True - - LOG.debug('Waiting for loadbalancer %(lb)s to become ready', - {'lb': lb_id}) - - eventlet.sleep(10) - waited += 10 - - return False - - def lb_create(self, vip, pool, cluster_name, hm=None, az=None, - flavor_id=None): - """Create a LBaaS instance - - :param vip: A dict containing the properties for the VIP; - :param pool: A dict describing the pool of load-balancer members. - :param hm: A dict describing the health monitor. - """ - def _cleanup(msg, **kwargs): - LOG.error(msg) - self.lb_delete(**kwargs) - return - - result = {} - # Create loadblancer - subnet_id = None - network_id = None - try: - if vip.get('subnet'): - subnet = self.nc().subnet_get(vip['subnet']) - subnet_id = subnet.id - if vip.get('network'): - network = self.nc().network_get(vip['network']) - network_id = network.id - except exception.InternalError as ex: - msg = 'Failed in getting subnet: %s.' % ex - LOG.exception(msg) - return False, msg - try: - lb_name = 'senlin-lb-%s' % cluster_name - lb = self.oc().loadbalancer_create( - subnet_id, network_id, vip.get('address', None), - vip['admin_state_up'], name=lb_name, - availability_zone=az, - flavor_id=flavor_id) - except exception.InternalError as ex: - msg = ('Failed in creating loadbalancer: %s.' - % str(ex)) - LOG.exception(msg) - return False, msg - result['loadbalancer'] = lb.id - result['vip_address'] = lb.vip_address - - res = self._wait_for_lb_ready(lb.id) - if res is False: - msg = 'Failed in creating loadbalancer (%s).' % lb.id - del result['vip_address'] - _cleanup(msg, **result) - return False, msg - - # Create listener - try: - listener_name = 'senlin-listener-%s' % cluster_name - listener = self.oc().listener_create(lb.id, vip['protocol'], - vip['protocol_port'], - vip.get('connection_limit', - None), - vip['admin_state_up'], - name=listener_name) - except exception.InternalError as ex: - msg = 'Failed in creating lb listener: %s.' % str(ex) - LOG.exception(msg) - return False, msg - result['listener'] = listener.id - res = self._wait_for_lb_ready(lb.id) - if res is False: - msg = 'Failed in creating listener (%s).' % listener.id - del result['vip_address'] - _cleanup(msg, **result) - return res, msg - - # Create pool - try: - pool_name = 'senlin-pool-%s' % cluster_name - pool = self.oc().pool_create(pool['lb_method'], listener.id, - pool['protocol'], - pool['session_persistence'], - pool['admin_state_up'], - name=pool_name) - except exception.InternalError as ex: - msg = 'Failed in creating lb pool: %s.' % str(ex) - LOG.exception(msg) - return False, msg - result['pool'] = pool.id - res = self._wait_for_lb_ready(lb.id) - if res is False: - msg = 'Failed in creating pool (%s).' % pool.id - del result['vip_address'] - _cleanup(msg, **result) - return res, msg - - if not hm: - return True, result - - # Create health monitor - try: - health_monitor = self.oc().healthmonitor_create( - hm['type'], hm['delay'], hm['timeout'], hm['max_retries'], - pool.id, hm['admin_state_up'], hm['http_method'], - hm['url_path'], hm['expected_codes']) - except exception.InternalError as ex: - msg = ('Failed in creating lb health monitor: %s.' - % str(ex)) - LOG.exception(msg) - return False, msg - result['healthmonitor'] = health_monitor.id - res = self._wait_for_lb_ready(lb.id) - if res is False: - msg = 'Failed in creating health monitor (%s).' % health_monitor.id - del result['vip_address'] - _cleanup(msg, **result) - return res, msg - - return True, result - - def lb_find(self, name_or_id, ignore_missing=False, - show_deleted=False): - return self.oc().loadbalancer_get(name_or_id, ignore_missing, - show_deleted) - - def lb_delete(self, **kwargs): - """Delete a Neutron lbaas instance - - The following Neutron lbaas resources will be deleted in order: - 1)healthmonitor; 2)pool; 3)listener; 4)loadbalancer. - """ - lb_id = kwargs.pop('loadbalancer') - - lb = self.lb_find(lb_id, ignore_missing=True) - if lb is None: - LOG.debug('Loadbalancer (%s) is not existing.', lb_id) - return True, _('LB deletion succeeded') - - healthmonitor_id = kwargs.pop('healthmonitor', None) - if healthmonitor_id: - try: - self.oc().healthmonitor_delete(healthmonitor_id) - except exception.InternalError as ex: - msg = ('Failed in deleting healthmonitor: %s.' - % str(ex)) - LOG.exception(msg) - return False, msg - res = self._wait_for_lb_ready(lb_id) - if res is False: - msg = ('Failed in deleting healthmonitor ' - '(%s).') % healthmonitor_id - return False, msg - - pool_id = kwargs.pop('pool', None) - if pool_id: - try: - self.oc().pool_delete(pool_id) - except exception.InternalError as ex: - msg = ('Failed in deleting lb pool: %s.' - % str(ex)) - LOG.exception(msg) - return False, msg - res = self._wait_for_lb_ready(lb_id) - if res is False: - msg = 'Failed in deleting pool (%s).' % pool_id - return False, msg - - listener_id = kwargs.pop('listener', None) - if listener_id: - try: - self.oc().listener_delete(listener_id) - except exception.InternalError as ex: - msg = ('Failed in deleting listener: %s.' - % str(ex)) - LOG.exception(msg) - return False, msg - res = self._wait_for_lb_ready(lb_id) - if res is False: - msg = 'Failed in deleting listener (%s).' % listener_id - return False, msg - - self.oc().loadbalancer_delete(lb_id) - res = self._wait_for_lb_ready(lb_id, ignore_not_found=True) - if res is False: - msg = 'Failed in deleting loadbalancer (%s).' % lb_id - return False, msg - - return True, _('LB deletion succeeded') - - def member_add(self, node, lb_id, pool_id, port, subnet): - """Add a member to Neutron lbaas pool. - - :param node: A node object to be added to the specified pool. - :param lb_id: The ID of the loadbalancer. - :param pool_id: The ID of the pool for receiving the node. - :param port: The port for the new LB member to be created. - :param subnet: The subnet to be used by the new LB member. - :returns: The ID of the new LB member or None if errors occurred. - """ - try: - subnet_obj = self.nc().subnet_get(subnet) - net_id = subnet_obj.network_id - net = self.nc().network_get(net_id) - except exception.InternalError as ex: - resource = 'subnet' if subnet in ex.message else 'network' - LOG.exception('Failed in getting %(resource)s: %(msg)s.', - {'resource': resource, 'msg': ex}) - return None - net_name = net.name - - ctx = oslo_context.get_current() - prof = pb.Profile.load(ctx, - profile_id=node.profile_id, - project_safe=False) - node_detail = prof.do_get_details(node) - addresses = node_detail.get('addresses') - node_name = node_detail.get('name') - if net_name not in addresses: - LOG.error('Node is not in subnet %(subnet)s', {'subnet': subnet}) - return None - - # Use the first IP address that match with the subnet ip_version - # if more than one are found in target network - address = None - for ip in addresses[net_name]: - if ip['version'] == subnet_obj.ip_version: - address = ip['addr'] - break - if not address: - LOG.error("Node does not match with subnet's (%s) ip version (%s)" - % (subnet, subnet_obj.ip_version)) - return None - try: - # FIXME(Yanyan Hu): Currently, Neutron lbaasv2 service can not - # handle concurrent lb member operations well: new member creation - # deletion request will directly fail rather than being lined up - # when another operation is still in progress. In this workaround, - # loadbalancer status will be checked before creating lb member - # request is sent out. If loadbalancer keeps unready till waiting - # timeout, exception will be raised to fail member_add. - res = self._wait_for_lb_ready(lb_id) - if not res: - msg = 'Loadbalancer %s is not ready.' % lb_id - raise exception.Error(msg) - member = self.oc().pool_member_create( - node_name, pool_id, address, port, subnet_obj.id) - except (exception.InternalError, exception.Error) as ex: - LOG.exception('Failed in creating lb pool member: %s.', ex) - return None - res = self._wait_for_lb_ready(lb_id) - if res is False: - LOG.error('Failed in creating pool member (%s).', member.id) - return None - - return member.id - - def member_remove(self, lb_id, pool_id, member_id): - """Delete a member from Neutron lbaas pool. - - :param lb_id: The ID of the loadbalancer the operation is targeted at; - :param pool_id: The ID of the pool from which the member is deleted; - :param member_id: The ID of the LB member. - :returns: True if the operation succeeded or False if errors occurred. - """ - try: - # FIXME(Yanyan Hu): Currently, Neutron lbaasv2 service can not - # handle concurrent lb member operations well: new member creation - # deletion request will directly fail rather than being lined up - # when another operation is still in progress. In this workaround, - # loadbalancer status will be checked before deleting lb member - # request is sent out. If loadbalancer keeps unready till waiting - # timeout, exception will be raised to fail member_remove. - res = self._wait_for_lb_ready(lb_id, ignore_not_found=True) - # res = self._wait_for_lb_ready(lb_id) - # if not res: - # msg = 'Loadbalancer %s is not ready.' % lb_id - # raise exception.Error(msg) - self.oc().pool_member_delete(pool_id, member_id) - except (exception.InternalError, exception.Error) as ex: - LOG.exception('Failed in removing member %(m)s from pool %(p)s: ' - '%(ex)s', {'m': member_id, 'p': pool_id, 'ex': ex}) - return None - res = self._wait_for_lb_ready(lb_id, ignore_not_found=True) - if res is False: - LOG.error('Failed in deleting pool member (%s).', member_id) - return None - - return True diff --git a/senlin/drivers/os/mistral_v2.py b/senlin/drivers/os/mistral_v2.py deleted file mode 100644 index 56b67f7f2..000000000 --- a/senlin/drivers/os/mistral_v2.py +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.drivers import base -from senlin.drivers import sdk - - -class MistralClient(base.DriverBase): - """Mistral V2 driver.""" - - def __init__(self, params): - super(MistralClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='workflow') - self.session = self.conn.session - - @sdk.translate_exception - def workflow_create(self, definition, scope): - attrs = { - 'definition': definition, - 'scope': scope - } - return self.conn.workflow.create_workflow(**attrs) - - @sdk.translate_exception - def workflow_delete(self, workflow, ignore_missing=True): - res = self.conn.workflow.delete_workflow( - workflow, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def workflow_find(self, name_or_id, ignore_missing=True): - res = self.conn.workflow.find_workflow( - name_or_id, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def execution_create(self, name, inputs): - attrs = { - 'workflow_name': name, - 'input': inputs - } - return self.conn.workflow.create_execution(**attrs) - - @sdk.translate_exception - def execution_delete(self, execution, ignore_missing=True): - res = self.conn.workflow.delete_execution( - execution, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def wait_for_execution(self, execution, status='SUCCESS', - failures=['ERROR'], interval=2, - timeout=None): - """Wait for execution creation complete""" - if timeout is None: - timeout = cfg.CONF.default_action_timeout - - execution_obj = self.conn.workflow.find_execution(execution, False) - self.conn.workflow.wait_for_status(execution_obj, status, failures, - interval, timeout) - return diff --git a/senlin/drivers/os/neutron_v2.py b/senlin/drivers/os/neutron_v2.py deleted file mode 100644 index 7b37b86cf..000000000 --- a/senlin/drivers/os/neutron_v2.py +++ /dev/null @@ -1,190 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils - -from senlin.drivers import base -from senlin.drivers import sdk - - -class NeutronClient(base.DriverBase): - """Neutron V2 driver.""" - - def __init__(self, params): - super(NeutronClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='network') - - @sdk.translate_exception - def network_get(self, name_or_id, ignore_missing=False): - # There are cases where network have the same names - # we have to do client side search by ourselves - if uuidutils.is_uuid_like(name_or_id): - return self.conn.network.find_network(name_or_id, ignore_missing) - - networks = [n for n in self.conn.network.networks(name=name_or_id)] - if len(networks) > 0: - return networks[0] - return None - - @sdk.translate_exception - def network_create(self, **attr): - network = self.conn.network.create_network(**attr) - return network - - @sdk.translate_exception - def network_delete(self, network, ignore_missing=False): - ret = self.conn.network.delete_network( - network, ignore_missing=ignore_missing) - return ret - - @sdk.translate_exception - def port_find(self, name_or_id, ignore_missing=False): - port = self.conn.network.find_port(name_or_id, ignore_missing) - return port - - @sdk.translate_exception - def security_group_find(self, name_or_id, ignore_missing=False, - project_id=None): - if project_id is None: - attrs = {} - else: - attrs = {'project_id': project_id} - - sg = self.conn.network.find_security_group(name_or_id, ignore_missing, - **attrs) - return sg - - @sdk.translate_exception - def security_group_create(self, name, description=''): - attr = { - 'name': name, - 'description': description, - } - sg = self.conn.network.create_security_group(**attr) - return sg - - @sdk.translate_exception - def security_group_delete(self, security_group_id, ignore_missing=False): - sg = self.conn.network.delete_security_group( - security_group_id, ignore_missing) - return sg - - @sdk.translate_exception - def security_group_rule_create(self, security_group_id, port_range_min, - port_range_max=None, ethertype='IPv4', - remote_ip_prefix='0.0.0.0/0', - direction='ingress', protocol='tcp'): - if port_range_max is None: - port_range_max = port_range_min - attr = { - 'direction': direction, - 'remote_ip_prefix': remote_ip_prefix, - 'protocol': protocol, - 'port_range_max': port_range_max, - 'port_range_min': port_range_min, - 'security_group_id': security_group_id, - 'ethertype': ethertype, - } - rule = self.conn.network.create_security_group_rule(**attr) - return rule - - @sdk.translate_exception - def subnet_get(self, name_or_id, ignore_missing=False): - subnet = self.conn.network.find_subnet(name_or_id, ignore_missing) - return subnet - - @sdk.translate_exception - def subnet_create(self, **attr): - subnet = self.conn.network.create_subnet(**attr) - return subnet - - @sdk.translate_exception - def router_create(self, **attr): - router = self.conn.network.create_router(**attr) - return router - - @sdk.translate_exception - def router_delete(self, router, ignore_missing=False): - ret = self.conn.network.delete_router( - router, ignore_missing=ignore_missing) - return ret - - @sdk.translate_exception - def add_interface_to_router(self, router, subnet_id=None, port_id=None): - interface = self.conn.network.add_interface_to_router( - router, subnet_id=subnet_id, port_id=port_id) - return interface - - @sdk.translate_exception - def remove_interface_from_router(self, router, subnet_id=None, - port_id=None): - interface = self.conn.network.remove_interface_from_router( - router, subnet_id=subnet_id, port_id=port_id) - return interface - - @sdk.translate_exception - def port_create(self, **attr): - res = self.conn.network.create_port(**attr) - return res - - @sdk.translate_exception - def port_delete(self, port, ignore_missing=True): - res = self.conn.network.delete_port( - port=port, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def port_update(self, port, **attr): - res = self.conn.network.update_port(port, **attr) - return res - - @sdk.translate_exception - def floatingip_find(self, name_or_id, ignore_missing=False): - res = self.conn.network.find_ip( - name_or_id, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def floatingip_list(self, fixed_ip=None, - floating_ip=None, floating_network=None, - port=None, router=None, status=None): - filters = {} - if fixed_ip: - filters['fixed_ip_address'] = fixed_ip - if floating_ip: - filters['floating_ip_address'] = floating_ip - if floating_network: - filters['floating_network_id'] = floating_network - if port: - filters['port_id'] = port - if router: - filters['router_id'] = router - if status: - filters['status'] = status - res = self.conn.network.ips(**filters) - return list(res) - - @sdk.translate_exception - def floatingip_create(self, **attr): - res = self.conn.network.create_ip(**attr) - return res - - @sdk.translate_exception - def floatingip_delete(self, floating_ip, ignore_missing=True): - res = self.conn.network.delete_ip( - floating_ip, ignore_missing=ignore_missing) - return res - - @sdk.translate_exception - def floatingip_update(self, floating_ip, **attr): - res = self.conn.network.update_ip(floating_ip, **attr) - return res diff --git a/senlin/drivers/os/nova_v2.py b/senlin/drivers/os/nova_v2.py deleted file mode 100644 index 8d0044cc9..000000000 --- a/senlin/drivers/os/nova_v2.py +++ /dev/null @@ -1,332 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as sdk_exc -from oslo_config import cfg -from oslo_log import log - -from senlin.common import consts -from senlin.drivers import base -from senlin.drivers import sdk - -LOG = log.getLogger(__name__) - - -class NovaClient(base.DriverBase): - """Nova V2 driver.""" - - def __init__(self, params): - super(NovaClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='compute') - self.session = self.conn.session - - @sdk.translate_exception - def flavor_find(self, name_or_id, ignore_missing=False): - return self.conn.compute.find_flavor(name_or_id, ignore_missing) - - @sdk.translate_exception - def keypair_create(self, **attrs): - return self.conn.compute.create_keypair(**attrs) - - @sdk.translate_exception - def keypair_delete(self, name_or_id, ignore_missing=False): - return self.conn.compute.delete_keypair(name_or_id, ignore_missing) - - @sdk.translate_exception - def keypair_find(self, name_or_id, ignore_missing=False): - return self.conn.compute.find_keypair(name_or_id, ignore_missing) - - @sdk.translate_exception - def server_create(self, **attrs): - server_obj = self.conn.compute.create_server(**attrs) - return server_obj - - @sdk.translate_exception - def server_get(self, server): - return self.conn.compute.get_server(server) - - @sdk.translate_exception - def server_update(self, server, **attrs): - return self.conn.compute.update_server(server, **attrs) - - @sdk.translate_exception - def server_delete(self, server, ignore_missing=True): - return self.conn.compute.delete_server(server, - ignore_missing=ignore_missing) - - @sdk.translate_exception - def server_force_delete(self, server, ignore_missing=True): - return self.conn.compute.delete_server(server, - ignore_missing=ignore_missing, - force=True) - - @sdk.translate_exception - def server_rebuild(self, server, image, name=None, admin_password=None, - **attrs): - attrs.update({ - "name": name, - "admin_password": admin_password - }) - return self.conn.compute.rebuild_server(server, image=image, **attrs) - - @sdk.translate_exception - def server_resize(self, server, flavor): - return self.conn.compute.resize_server(server, flavor) - - @sdk.translate_exception - def server_resize_confirm(self, server): - return self.conn.compute.confirm_server_resize(server) - - @sdk.translate_exception - def server_resize_revert(self, server): - return self.conn.compute.revert_server_resize(server) - - @sdk.translate_exception - def server_reboot(self, server, reboot_type): - return self.conn.compute.reboot_server(server, reboot_type) - - @sdk.translate_exception - def server_change_password(self, server, new_password): - return self.conn.compute.change_server_password(server, new_password) - - @sdk.translate_exception - def server_pause(self, server): - return self.conn.compute.pause_server(server) - - @sdk.translate_exception - def server_unpause(self, server): - return self.conn.compute.unpause_server(server) - - @sdk.translate_exception - def server_suspend(self, server): - return self.conn.compute.suspend_server(server) - - @sdk.translate_exception - def server_resume(self, server): - return self.conn.compute.resume_server(server) - - @sdk.translate_exception - def server_lock(self, server): - return self.conn.compute.lock_server(server) - - @sdk.translate_exception - def server_unlock(self, server): - return self.conn.compute.unlock_server(server) - - @sdk.translate_exception - def server_start(self, server): - return self.conn.compute.start_server(server) - - @sdk.translate_exception - def server_stop(self, server): - return self.conn.compute.stop_server(server) - - @sdk.translate_exception - def server_rescue(self, server, admin_pass=None, image_ref=None): - return self.conn.compute.rescue_server(server, admin_pass=admin_pass, - image_ref=image_ref) - - @sdk.translate_exception - def server_unrescue(self, server): - return self.conn.compute.unrescue_server(server) - - @sdk.translate_exception - def server_migrate(self, server): - return self.conn.compute.migrate_server(server) - - @sdk.translate_exception - def server_evacuate(self, server, host=None, admin_pass=None, force=None): - return self.conn.compute.evacuate_server( - server, host=host, admin_pass=admin_pass, force=force) - - @sdk.translate_exception - def server_create_image(self, server, name, metadata=None): - return self.conn.compute.create_server_image(server, name, metadata) - - @sdk.translate_exception - def wait_for_server(self, server, status=consts.VS_ACTIVE, - failures=None, - interval=2, timeout=None): - """Wait for server creation complete""" - if failures is None: - failures = [consts.VS_ERROR] - if timeout is None: - timeout = cfg.CONF.default_nova_timeout - - server_obj = self.conn.compute.find_server(server, False) - self.conn.compute.wait_for_server(server_obj, status=status, - failures=failures, - interval=interval, - wait=timeout) - return - - @sdk.translate_exception - def wait_for_server_delete(self, server, timeout=None): - """Wait for server deleting complete""" - if timeout is None: - timeout = cfg.CONF.default_nova_timeout - - server_obj = self.conn.compute.find_server(server, True) - if server_obj: - self.conn.compute.wait_for_delete(server_obj, wait=timeout) - - return - - @sdk.translate_exception - def server_interface_create(self, server, **attrs): - return self.conn.compute.create_server_interface(server, **attrs) - - @sdk.translate_exception - def server_interface_list(self, server, **query): - return self.conn.compute.server_interfaces(server, **query) - - @sdk.translate_exception - def server_interface_delete(self, interface, server, ignore_missing=True): - return self.conn.compute.delete_server_interface(interface, server, - ignore_missing) - - @sdk.translate_exception - def server_metadata_get(self, server): - res = self.conn.compute.get_server_metadata(server) - return res.metadata - - def _ignore_forbidden_call(self, func, *args, **kwargs): - try: - return func(*args, **kwargs) - except sdk_exc.HttpException as exc: - if exc.status_code != 403: - raise - - @sdk.translate_exception - def server_metadata_update(self, server, metadata): - # Clean all existing metadata first - res = self.conn.compute.get_server_metadata(server) - if res.metadata: - for key in res.metadata: - self._ignore_forbidden_call( - self.conn.compute.delete_server_metadata, server, [key]) - if metadata: - for key, value in metadata.items(): - self._ignore_forbidden_call( - self.conn.compute.set_server_metadata, - server, **{key: value}) - - @sdk.translate_exception - def server_metadata_delete(self, server, keys): - self.conn.compute.delete_server_metadata(server, keys) - - @sdk.translate_exception - def availability_zone_list(self, **query): - return self.conn.compute.availability_zones(**query) - - def validate_azs(self, azs): - """check whether availability zones provided are valid. - - :param azs: A list of availability zone names for checking. - :returns: A list of zones that are found available on Nova. - """ - known = self.availability_zone_list() - names = [az.name for az in known if az.state['available']] - - found = [] - for az in azs: - if az in names: - found.append(az) - else: - LOG.warning("Availability zone '%s' is not available.", - az) - return found - - @sdk.translate_exception - def server_group_create(self, **attrs): - return self.conn.compute.create_server_group(**attrs) - - @sdk.translate_exception - def server_group_delete(self, server_group, ignore_missing=True): - return self.conn.compute.delete_server_group( - server_group, ignore_missing=ignore_missing) - - @sdk.translate_exception - def server_group_find(self, name_or_id, ignore_missing=True): - return self.conn.compute.find_server_group( - name_or_id, ignore_missing=ignore_missing) - - @sdk.translate_exception - def hypervisor_list(self, **query): - return self.conn.compute.hypervisors(**query) - - @sdk.translate_exception - def hypervisor_get(self, hypervisor): - return self.conn.compute.get_hypervisor(hypervisor) - - @sdk.translate_exception - def hypervisor_find(self, name_or_id, ignore_missing=False): - # try finding hypervisor by id - try: - return self.conn.compute.get_hypervisor(name_or_id) - except sdk_exc.HttpException: - # ignore http exception and instead get list and check by name - pass - - # if the hypervisor could not be found using id, search list using name - results = self.conn.compute.hypervisors( - hypervisor_hostname_pattern=name_or_id) - - result = None - for maybe_result in results: - name_value = maybe_result.name - - if name_value == name_or_id: - # Only allow one resource to be found. If we already - # found a match, raise an exception to show it. - if result is None: - result = maybe_result - else: - msg = "More than one hypervisor exists with the name '%s'." - msg = (msg % name_or_id) - raise sdk_exc.DuplicateResource(msg) - - if result is not None: - return result - - if ignore_missing: - return None - raise sdk_exc.ResourceNotFound( - "No hypervisor found for %s" % (name_or_id)) - - @sdk.translate_exception - def service_list(self): - return self.conn.compute.services() - - @sdk.translate_exception - def service_force_down(self, service): - return self.conn.compute.force_service_down(service, service.host, - service.binary) - - @sdk.translate_exception - def create_volume_attachment(self, server, **attr): - return self.conn.compute.create_volume_attachment(server, **attr) - - @sdk.translate_exception - def delete_volume_attachment(self, volume_id, server, ignore_missing=True): - return self.conn.compute.delete_volume_attachment( - volume_id, server, ignore_missing=ignore_missing - ) - - @sdk.translate_exception - def server_floatingip_associate(self, server, address): - return self.conn.compute.add_floating_ip_to_server(server, address) - - @sdk.translate_exception - def server_floatingip_disassociate(self, server, address): - return self.conn.compute.remove_floating_ip_from_server(server, - address) diff --git a/senlin/drivers/os/octavia_v2.py b/senlin/drivers/os/octavia_v2.py deleted file mode 100644 index d55d2d034..000000000 --- a/senlin/drivers/os/octavia_v2.py +++ /dev/null @@ -1,193 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class OctaviaClient(base.DriverBase): - """Octavia v2 client""" - - def __init__(self, params): - super(OctaviaClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='load-balancer') - - @sdk.translate_exception - def loadbalancer_get(self, name_or_id, ignore_missing=False, - show_deleted=False): - lb = self.conn.load_balancer.find_load_balancer(name_or_id, - ignore_missing) - # TODO(liyi) - # It's unreasonable for octavia don't support filter deleted - # loadbalancers. So if supported, we need to change the function. - if lb and not show_deleted and lb.provisioning_status == 'DELETED': - lb = None - - return lb - - @sdk.translate_exception - def loadbalancer_create(self, vip_subnet_id=None, vip_network_id=None, - vip_address=None, admin_state_up=True, name=None, - description=None, availability_zone=None, - flavor_id=None): - - kwargs = { - 'admin_state_up': admin_state_up, - } - - if vip_subnet_id is not None: - kwargs['vip_subnet_id'] = vip_subnet_id - if vip_network_id is not None: - kwargs['vip_network_id'] = vip_network_id - if vip_address is not None: - kwargs['vip_address'] = vip_address - if name is not None: - kwargs['name'] = name - if description is not None: - kwargs['description'] = description - if availability_zone is not None: - kwargs['availability_zone'] = availability_zone - if flavor_id is not None: - kwargs['flavor_id'] = flavor_id - - res = self.conn.load_balancer.create_load_balancer(**kwargs) - return res - - @sdk.translate_exception - def loadbalancer_delete(self, lb_id, ignore_missing=True): - self.conn.load_balancer.delete_load_balancer( - lb_id, ignore_missing=ignore_missing) - return - - @sdk.translate_exception - def listener_create(self, loadbalancer_id, protocol, protocol_port, - connection_limit=None, - admin_state_up=True, name=None, description=None): - - kwargs = { - 'loadbalancer_id': loadbalancer_id, - 'protocol': protocol, - 'protocol_port': protocol_port, - 'admin_state_up': admin_state_up, - } - - if connection_limit is not None: - kwargs['connection_limit'] = connection_limit - if name is not None: - kwargs['name'] = name - if description is not None: - kwargs['description'] = description - - res = self.conn.load_balancer.create_listener(**kwargs) - return res - - @sdk.translate_exception - def listener_delete(self, listener_id, ignore_missing=True): - self.conn.load_balancer.delete_listener( - listener_id, ignore_missing=ignore_missing) - return - - @sdk.translate_exception - def pool_create(self, lb_algorithm, listener_id, protocol, - session_persistence, admin_state_up=True, - name=None, description=None): - - # Remove cookie_name when type not equal to APP_COOKIE - if session_persistence.get('type') != 'APP_COOKIE': - session_persistence.pop('cookie_name', None) - # When type=NONE set session_persistence to None - if session_persistence.get('type') in ('NONE', None): - session_persistence = None - - kwargs = { - 'lb_algorithm': lb_algorithm, - 'listener_id': listener_id, - 'protocol': protocol, - 'session_persistence': session_persistence, - 'admin_state_up': admin_state_up, - } - - if name is not None: - kwargs['name'] = name - if description is not None: - kwargs['description'] = description - - res = self.conn.load_balancer.create_pool(**kwargs) - return res - - @sdk.translate_exception - def pool_delete(self, pool_id, ignore_missing=True): - self.conn.load_balancer.delete_pool( - pool_id, ignore_missing=ignore_missing) - return - - @sdk.translate_exception - def pool_member_create(self, name, pool_id, address, protocol_port, - subnet_id, weight=None, admin_state_up=True): - - kwargs = { - 'name': name, - 'address': address, - 'protocol_port': protocol_port, - 'admin_state_up': admin_state_up, - 'subnet_id': subnet_id, - } - - if weight is not None: - kwargs['weight'] = weight - - res = self.conn.load_balancer.create_member(pool_id, **kwargs) - return res - - @sdk.translate_exception - def pool_member_delete(self, pool_id, member_id, ignore_missing=True): - self.conn.load_balancer.delete_member( - member_id, pool_id, ignore_missing=ignore_missing) - return - - @sdk.translate_exception - def healthmonitor_create(self, hm_type, delay, timeout, max_retries, - pool_id, admin_state_up=True, - http_method=None, url_path=None, - expected_codes=None): - kwargs = { - 'type': hm_type, - 'delay': delay, - 'timeout': timeout, - 'max_retries': max_retries, - 'pool_id': pool_id, - 'admin_state_up': admin_state_up, - } - - # TODO(anyone): verify if this is correct - if hm_type == 'HTTP': - if http_method is not None: - kwargs['http_method'] = http_method - if url_path is not None: - kwargs['url_path'] = url_path - if expected_codes is not None: - kwargs['expected_codes'] = expected_codes - - res = self.conn.load_balancer.create_health_monitor(**kwargs) - return res - - @sdk.translate_exception - def healthmonitor_delete(self, hm_id, ignore_missing=True): - self.conn.load_balancer.delete_health_monitor( - hm_id, ignore_missing=ignore_missing) - return - - @sdk.translate_exception - def find_flavor(self, flavor_id, ignore_missing=False): - self.conn.load_balancer.find_flavor( - flavor_id, ignore_missing=ignore_missing) - return diff --git a/senlin/drivers/os/zaqar_v2.py b/senlin/drivers/os/zaqar_v2.py deleted file mode 100644 index d9ff20ad2..000000000 --- a/senlin/drivers/os/zaqar_v2.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as sdk_exc - -from senlin.drivers import base -from senlin.drivers import sdk - - -class ZaqarClient(base.DriverBase): - """Zaqar V2 driver.""" - - def __init__(self, params): - super(ZaqarClient, self).__init__(params) - self.conn = sdk.create_connection(params, service_type='messaging') - self.session = self.conn.session - - @sdk.translate_exception - def queue_create(self, **attrs): - return self.conn.message.create_queue(**attrs) - - @sdk.translate_exception - def queue_exists(self, queue_name): - try: - self.conn.message.get_queue(queue_name) - return True - except sdk_exc.ResourceNotFound: - return False - - @sdk.translate_exception - def queue_delete(self, queue, ignore_missing=True): - return self.conn.message.delete_queue(queue, ignore_missing) - - @sdk.translate_exception - def subscription_create(self, queue_name, **attrs): - return self.conn.message.create_subscription(queue_name, **attrs) - - @sdk.translate_exception - def subscription_delete(self, queue_name, subscription, - ignore_missing=True): - return self.conn.message.delete_subscription(queue_name, subscription, - ignore_missing) - - @sdk.translate_exception - def claim_create(self, queue_name, **attrs): - return self.conn.message.create_claim(queue_name, **attrs) - - @sdk.translate_exception - def claim_delete(self, queue_name, claim, ignore_missing=True): - return self.conn.message.delete_claim(queue_name, claim, - ignore_missing) - - @sdk.translate_exception - def message_delete(self, queue_name, message, claim_id=None, - ignore_missing=True): - return self.conn.message.delete_message(queue_name, message, - claim_id, ignore_missing) - - @sdk.translate_exception - def message_post(self, queue_name, message): - return self.conn.message.post_message(queue_name, message) diff --git a/senlin/drivers/sdk.py b/senlin/drivers/sdk.py deleted file mode 100644 index ccf3b0970..000000000 --- a/senlin/drivers/sdk.py +++ /dev/null @@ -1,194 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SDK Client -""" -import sys - -import functools -import openstack -from openstack import exceptions as sdk_exc -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from requests import exceptions as req_exc - -from senlin.common import context -from senlin.common import exception as senlin_exc -from senlin import version - -USER_AGENT = 'senlin' -exc = sdk_exc -LOG = logging.getLogger(__name__) - -openstack.enable_logging(debug=False, stream=sys.stdout) - - -def parse_exception(ex): - """Parse exception code and yield useful information.""" - code = 500 - - if isinstance(ex, sdk_exc.HttpException): - # some exceptions don't contain status_code - if hasattr(ex, "status_code") and ex.status_code is not None: - code = ex.status_code - elif hasattr(ex, "http_status") and ex.http_status is not None: - code = ex.http_status - - message = str(ex) - data = {} - if ex.details is None and ex.response is not None: - data = ex.response.json() - else: - try: - data = jsonutils.loads(ex.details) - except Exception: - # Some exceptions don't have details record or - # are not in JSON format - pass - - # try dig more into the exception record - # usually 'data' has two types of format : - # type1: {"forbidden": {"message": "error message", "code": 403} - # type2: {"code": 404, "error": { "message": "not found"}} - if data: - code = data.get('code', code) - message = data.get('message', message) - error = data.get('error', None) - if error: - code = data.get('code', code) - message = data['error'].get('message', message) - else: - for value in data.values(): - code = value.get('code', code) - message = value.get('message', message) - - elif isinstance(ex, sdk_exc.SDKException): - # Besides HttpException there are some other exceptions like - # ResourceTimeout can be raised from SDK, handle them here. - message = str(ex) - elif isinstance(ex, req_exc.RequestException): - # Exceptions that are not captured by SDK - code = ex.errno - message = str(ex) - else: - # This could be a generic exception or something we don't understand - message = str(ex) - - if code >= 500 or code in (400, 401, 403): - LOG.error(message) - else: - LOG.info(message) - - raise senlin_exc.InternalError(code=code, message=message) - - -def translate_exception(func): - """Decorator for exception translation.""" - - @functools.wraps(func) - def invoke_with_catch(driver, *args, **kwargs): - try: - return func(driver, *args, **kwargs) - except Exception as ex: - raise parse_exception(ex) - - return invoke_with_catch - - -def create_connection(params=None, service_type='identity'): - """Create a connection to SDK service client.""" - params = params or {} - params.setdefault('region_name', cfg.CONF.default_region_name) - params.setdefault('identity_api_version', '3') - params.setdefault('messaging_api_version', '2') - - if 'token' in params: - # NOTE(daiplg): If existing token is provided, use admin_token plugin - # to authenticate to avoid fetching service catalog or determining - # scope info because of: - # https://bugs.launchpad.net/keystone/+bug/1959674 - # Refer: keystoneauth1.loading._plugins.admin_token.AdminToken - params['auth_type'] = 'admin_token' - if 'endpoint' not in params: - # NOTE(daiplg): Because there is no service catalog the endpoint - # that is supplied with initialization is used for all operations - # performed with this plugin so must be the full base URL to - # an actual service. - service_credentials = context.get_service_credentials() or {} - admin_connection = _create_connection(service_credentials) - - region_name = params['region_name'] - interface = service_credentials.get('interface', 'public') - - temp_adapter = admin_connection.config.get_session_client( - service_type=service_type, - region_name=region_name, - allow_version_hack=True, - ) - params['endpoint'] = temp_adapter.get_endpoint( - region_name=region_name, - interface=interface - ) - return _create_connection(params) - - -def _create_connection(params=None): - """Create a connection to SDK service client.""" - params = params or {} - try: - connection = openstack.connect( - load_envvars=False, - load_yaml_config=False, - insecure=not cfg.CONF.authentication.verify_ssl, - cafile=cfg.CONF.authentication.cafile, - cert=cfg.CONF.authentication.certfile, - key=cfg.CONF.authentication.keyfile, - app_name=USER_AGENT, - app_version=version.version_info.version_string(), - **params, - ) - except Exception as ex: - raise parse_exception(ex) - return connection - - -def authenticate(**kwargs): - """Authenticate using openstack sdk based on user credential""" - - conn = create_connection(kwargs) - access_info = { - 'token': conn.session.get_token(), - 'user_id': conn.session.get_user_id(), - 'project_id': conn.session.get_project_id() - } - - return access_info - - -class FakeResourceObject(object): - """Generate a fake SDK resource object based on given dictionary""" - - def __init__(self, params): - for key in params: - setattr(self, key, params[key]) - - def to_dict(self): - """Override this function in subclass to handle special attributes""" - data = {} - for attr in dir(self): - if not attr.startswith('__'): - # Exclude built-in attributes of python object - data[attr] = getattr(self, attr) - - return data diff --git a/senlin/engine/__init__.py b/senlin/engine/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/engine/actions/__init__.py b/senlin/engine/actions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/engine/actions/base.py b/senlin/engine/actions/base.py deleted file mode 100644 index 43e928c1e..000000000 --- a/senlin/engine/actions/base.py +++ /dev/null @@ -1,692 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from senlin.common import consts -from senlin.common import context as req_context -from senlin.common import exception -from senlin.common import utils -from senlin.engine import dispatcher -from senlin.engine import event as EVENT -from senlin.objects import action as ao -from senlin.objects import cluster_lock as cl -from senlin.objects import cluster_policy as cpo -from senlin.objects import dependency as dobj -from senlin.objects import node_lock as nl -from senlin.policies import base as policy_mod - -wallclock = time.time -LOG = logging.getLogger(__name__) - - -class Action(object): - """An action can be performed on a cluster or a node of a cluster.""" - - RETURNS = ( - RES_OK, RES_ERROR, RES_RETRY, RES_CANCEL, RES_TIMEOUT, - RES_LIFECYCLE_COMPLETE, RES_LIFECYCLE_HOOK_TIMEOUT, - ) = ( - 'OK', 'ERROR', 'RETRY', 'CANCEL', 'TIMEOUT', 'LIFECYCLE_COMPLETE', - 'LIFECYCLE_HOOK_TIMEOUT' - ) - - # Action status definitions: - # INIT: Not ready to be executed because fields are being modified, - # or dependency with other actions are being analyzed. - # READY: Initialized and ready to be executed by a worker. - # RUNNING: Being executed by a worker thread. - # SUCCEEDED: Completed with success. - # FAILED: Completed with failure. - # CANCELLED: Action cancelled because worker thread was cancelled. - STATUSES = ( - INIT, WAITING, READY, RUNNING, SUSPENDED, - SUCCEEDED, FAILED, CANCELLED, WAITING_LIFECYCLE_COMPLETION - ) = ( - 'INIT', 'WAITING', 'READY', 'RUNNING', 'SUSPENDED', - 'SUCCEEDED', 'FAILED', 'CANCELLED', 'WAITING_LIFECYCLE_COMPLETION' - ) - - # Signal commands - COMMANDS = ( - SIG_CANCEL, SIG_SUSPEND, SIG_RESUME, - ) = ( - 'CANCEL', 'SUSPEND', 'RESUME', - ) - - def __new__(cls, target, action, ctx, **kwargs): - if (cls != Action): - return super(Action, cls).__new__(cls) - - target_type = action.split('_')[0] - if target_type == 'CLUSTER': - from senlin.engine.actions import cluster_action - ActionClass = cluster_action.ClusterAction - elif target_type == 'NODE': - from senlin.engine.actions import node_action - ActionClass = node_action.NodeAction - else: - from senlin.engine.actions import custom_action - ActionClass = custom_action.CustomAction - - return super(Action, cls).__new__(ActionClass) - - def __init__(self, target, action, ctx, **kwargs): - # context will be persisted into database so that any worker thread - # can pick the action up and execute it on behalf of the initiator - - self.id = kwargs.get('id', None) - self.name = kwargs.get('name', '') - self.cluster_id = kwargs.get('cluster_id', '') - - self.context = ctx - self.user = ctx.user_id - self.project = ctx.project_id - self.domain = ctx.domain_id - - self.action = action - self.target = target - - # Why this action is fired, it can be a UUID of another action - self.cause = kwargs.get('cause', '') - - # Owner can be an UUID format ID for the worker that is currently - # working on the action. It also serves as a lock. - self.owner = kwargs.get('owner', None) - - # An action may need to be executed repeatitively, interval is the - # time in seconds between two consecutive execution. - # A value of -1 indicates that this action is only to be executed once - self.interval = kwargs.get('interval', -1) - - # Start time can be an absolute time or a time relative to another - # action. E.g. - # - '2014-12-18 08:41:39.908569' - # - 'AFTER: 57292917-af90-4c45-9457-34777d939d4d' - # - 'WHEN: 0265f93b-b1d7-421f-b5ad-cb83de2f559d' - self.start_time = kwargs.get('start_time', None) - self.end_time = kwargs.get('end_time', None) - - # Timeout is a placeholder in case some actions may linger too long - self.timeout = kwargs.get('timeout', cfg.CONF.default_action_timeout) - - # Return code, useful when action is not automatically deleted - # after execution - self.status = kwargs.get('status', self.INIT) - self.status_reason = kwargs.get('status_reason', '') - - # All parameters are passed in using keyword arguments which is - # a dictionary stored as JSON in DB - self.inputs = kwargs.get('inputs', {}) - self.outputs = kwargs.get('outputs', {}) - - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.data = kwargs.get('data', {}) - - def store(self, ctx): - """Store the action record into database table. - - :param ctx: An instance of the request context. - :return: The ID of the stored object. - """ - - timestamp = timeutils.utcnow(True) - - values = { - 'name': self.name, - 'cluster_id': self.cluster_id, - 'context': self.context.to_dict(), - 'target': self.target, - 'action': self.action, - 'cause': self.cause, - 'owner': self.owner, - 'interval': self.interval, - 'start_time': self.start_time, - 'end_time': self.end_time, - 'timeout': self.timeout, - 'status': self.status, - 'status_reason': self.status_reason, - 'inputs': self.inputs, - 'outputs': self.outputs, - 'created_at': self.created_at, - 'updated_at': self.updated_at, - 'data': self.data, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - } - - if self.id: - self.updated_at = timestamp - values['updated_at'] = timestamp - ao.Action.update(ctx, self.id, values) - else: - self.created_at = timestamp - values['created_at'] = timestamp - action = ao.Action.create(ctx, values) - self.id = action.id - - return self.id - - @classmethod - def _from_object(cls, obj): - """Construct an action from database object. - - :param obj: a DB action object that contains all fields. - :return: An `Action` object deserialized from the DB action object. - """ - ctx = req_context.RequestContext.from_dict(obj.context) - kwargs = { - 'id': obj.id, - 'name': obj.name, - 'cluster_id': obj.cluster_id, - 'cause': obj.cause, - 'owner': obj.owner, - 'interval': obj.interval, - 'start_time': obj.start_time, - 'end_time': obj.end_time, - 'timeout': obj.timeout, - 'status': obj.status, - 'status_reason': obj.status_reason, - 'inputs': obj.inputs or {}, - 'outputs': obj.outputs or {}, - 'created_at': obj.created_at, - 'updated_at': obj.updated_at, - 'data': obj.data, - } - - target_type = obj.action.split('_')[0] - if target_type == 'CLUSTER': - from senlin.engine.actions import cluster_action - ActionClass = cluster_action.ClusterAction - elif target_type == 'NODE': - from senlin.engine.actions import node_action - ActionClass = node_action.NodeAction - else: - from senlin.engine.actions import custom_action - ActionClass = custom_action.CustomAction - - return ActionClass(obj.target, obj.action, ctx, **kwargs) - - @classmethod - def load(cls, ctx, action_id=None, db_action=None, project_safe=True): - """Retrieve an action from database. - - :param ctx: Instance of request context. - :param action_id: An UUID for the action to deserialize. - :param db_action: An action object for the action to deserialize. - :return: A `Action` object instance. - """ - if db_action is None: - db_action = ao.Action.get(ctx, action_id, - project_safe=project_safe) - if db_action is None: - raise exception.ResourceNotFound(type='action', id=action_id) - - return cls._from_object(db_action) - - @classmethod - def create(cls, ctx, target, action, force=False, **kwargs): - """Create an action object. - - :param ctx: The requesting context. - :param target: The ID of the target cluster/node. - :param action: Name of the action. - :param force: Skip checking locks/conflicts - :param dict kwargs: Other keyword arguments for the action. - :return: ID of the action created. - """ - if not force: - cls._check_action_lock(target, action) - cls._check_conflicting_actions(ctx, target, action) - - params = { - 'user_id': ctx.user_id, - 'project_id': ctx.project_id, - 'domain_id': ctx.domain_id, - 'is_admin': ctx.is_admin, - 'request_id': ctx.request_id, - 'trusts': ctx.trusts, - } - c = req_context.RequestContext.from_dict(params) - - if action in consts.CLUSTER_SCALE_ACTIONS: - Action.validate_scaling_action(c, target, action) - - obj = cls(target, action, c, **kwargs) - return obj.store(ctx) - - @staticmethod - def _check_action_lock(target, action): - if action in consts.LOCK_BYPASS_ACTIONS: - return - elif (action in list(consts.CLUSTER_ACTION_NAMES) and - cl.ClusterLock.is_locked(target)): - raise exception.ResourceIsLocked( - action=action, type='cluster', id=target) - elif (action in list(consts.NODE_ACTION_NAMES) and - nl.NodeLock.is_locked(target)): - raise exception.ResourceIsLocked( - action=action, type='node', id=target) - - @staticmethod - def _check_conflicting_actions(ctx, target, action): - conflict_actions = ao.Action.get_all_active_by_target(ctx, target) - # Ignore conflicting actions on deletes. - if not conflict_actions or action in consts.CONFLICT_BYPASS_ACTIONS: - return - else: - action_ids = [a['id'] for a in conflict_actions] - raise exception.ActionConflict( - type=action, target=target, actions=",".join(action_ids)) - - @classmethod - def delete(cls, ctx, action_id): - """Delete an action from database. - - :param ctx: An instance of the request context. - :param action_id: The UUID of the target action to be deleted. - :return: Nothing. - """ - ao.Action.delete(ctx, action_id) - - def signal(self, cmd): - """Send a signal to the action. - - :param cmd: One of the command word defined in self.COMMANDS. - :returns: None - """ - if cmd not in self.COMMANDS: - return - - if cmd == self.SIG_CANCEL: - expected = (self.INIT, self.WAITING, self.READY, self.RUNNING, - self.WAITING_LIFECYCLE_COMPLETION) - elif cmd == self.SIG_SUSPEND: - expected = (self.RUNNING) - else: # SIG_RESUME - expected = (self.SUSPENDED) - - if self.status not in expected: - LOG.info("Action (%(id)s) is in status (%(actual)s) while " - "expected status must be one of (%(expected)s).", - dict(id=self.id[:8], expected=expected, - actual=self.status)) - return - - ao.Action.signal(self.context, self.id, cmd) - - def signal_cancel(self): - """Signal the action and any depended actions to cancel. - - If the action or any depended actions are in status - 'WAITING_LIFECYCLE_COMPLETION' or 'INIT' update the status to cancelled - directly. - - :raises: `ActionImmutable` if the action is in an unchangeable state - """ - expected = (self.INIT, self.WAITING, self.READY, self.RUNNING, - self.WAITING_LIFECYCLE_COMPLETION) - - if self.status not in expected: - raise exception.ActionImmutable(id=self.id[:8], expected=expected, - actual=self.status) - - ao.Action.signal(self.context, self.id, self.SIG_CANCEL) - - if self.status in (self.WAITING_LIFECYCLE_COMPLETION, self.INIT): - self.set_status(self.RES_CANCEL, 'Action execution cancelled') - - depended = dobj.Dependency.get_depended(self.context, self.id) - if not depended: - return - - for child in depended: - # Try to cancel all dependant actions - action = self.load(self.context, action_id=child) - if not action.is_cancelled(): - ao.Action.signal(self.context, child, self.SIG_CANCEL) - # If the action is in WAITING_LIFECYCLE_COMPLETION or INIT update - # the status to CANCELLED immediately. - if action.status in (action.WAITING_LIFECYCLE_COMPLETION, - action.INIT): - action.set_status(action.RES_CANCEL, - 'Action execution cancelled') - - def force_cancel(self): - """Force the action and any depended actions to cancel. - - If the action or any depended actions are in status 'INIT', 'WAITING', - 'READY', 'RUNNING', or 'WAITING_LIFECYCLE_COMPLETION' immediately - update their status to cancelled. This should only be used if an action - is stuck/dead and has no expectation of ever completing. - - :raises: `ActionImmutable` if the action is in an unchangeable state - """ - LOG.debug('Forcing action %s to cancel.', self.id) - self.set_status(self.RES_CANCEL, 'Action execution force cancelled') - - self.release_lock() - - depended = dobj.Dependency.get_depended(self.context, self.id) - if not depended: - return - - for child in depended: - # Force cancel all dependant actions - action = self.load(self.context, action_id=child) - if action.status in (action.INIT, action.WAITING, action.READY, - action.RUNNING, - action.WAITING_LIFECYCLE_COMPLETION): - LOG.debug('Forcing action %s to cancel.', action.id) - action.set_status(action.RES_CANCEL, - 'Action execution force cancelled') - action.release_lock() - - def execute(self, **kwargs): - """Execute the action. - - In theory, the action encapsulates all information needed for - execution. 'kwargs' may specify additional parameters. - :param kwargs: additional parameters that may override the default - properties stored in the action record. - """ - raise NotImplementedError - - def release_lock(self): - """Release the lock associated with the action.""" - raise NotImplementedError - - def set_status(self, result, reason=None): - """Set action status based on return value from execute.""" - - timestamp = wallclock() - - if result == self.RES_OK: - status = self.SUCCEEDED - ao.Action.mark_succeeded(self.context, self.id, timestamp) - - elif result == self.RES_ERROR: - status = self.FAILED - ao.Action.mark_failed(self.context, self.id, timestamp, - reason or 'ERROR') - - elif result == self.RES_TIMEOUT: - status = self.FAILED - ao.Action.mark_failed(self.context, self.id, timestamp, - reason or 'TIMEOUT') - - elif result == self.RES_CANCEL: - status = self.CANCELLED - ao.Action.mark_cancelled(self.context, self.id, timestamp) - - else: # result == self.RES_RETRY: - retries = self.data.get('retries', 0) - # Action failed at the moment, but can be retried - # retries time is configurable - if retries < cfg.CONF.lock_retry_times: - status = self.READY - retries += 1 - - self.data.update({'retries': retries}) - ao.Action.abandon(self.context, self.id, {'data': self.data}) - # sleep for a while - eventlet.sleep(cfg.CONF.lock_retry_interval) - dispatcher.start_action(self.id) - else: - status = self.RES_ERROR - if not reason: - reason = ('Exceeded maximum number of retries (%d)' - '') % cfg.CONF.lock_retry_times - ao.Action.mark_failed(self.context, self.id, timestamp, reason) - - if status == self.SUCCEEDED: - EVENT.info(self, consts.PHASE_END, reason or 'SUCCEEDED') - elif status == self.READY: - EVENT.warning(self, consts.PHASE_ERROR, reason or 'RETRY') - else: - EVENT.error(self, consts.PHASE_ERROR, reason or 'ERROR') - - self.status = status - self.status_reason = reason - - def get_status(self): - timestamp = wallclock() - status = ao.Action.check_status(self.context, self.id, timestamp) - self.status = status - return status - - def is_timeout(self, timeout=None): - if timeout is None: - timeout = self.timeout - if self.start_time is None: - return False - time_elapse = wallclock() - self.start_time - return time_elapse > timeout - - def _check_signal(self): - # Check timeout first, if true, return timeout message - if self.timeout is not None and self.is_timeout(): - EVENT.debug(self, consts.PHASE_ERROR, 'TIMEOUT') - return self.RES_TIMEOUT - - result = ao.Action.signal_query(self.context, self.id) - return result - - def is_cancelled(self): - return self._check_signal() == self.SIG_CANCEL - - def is_suspended(self): - return self._check_signal() == self.SIG_SUSPEND - - def is_resumed(self): - return self._check_signal() == self.SIG_RESUME - - def policy_check(self, cluster_id, target): - """Check all policies attached to cluster and give result. - - :param cluster_id: The ID of the cluster to which the policy is - attached. - :param target: A tuple of ('when', action_name) - :return: A dictionary that contains the check result. - """ - - if target not in ['BEFORE', 'AFTER']: - return - - bindings = cpo.ClusterPolicy.get_all(self.context, cluster_id, - sort='priority', - filters={'enabled': True}) - - # default values - self.data['status'] = policy_mod.CHECK_NONE - self.data['reason'] = '' - - for pb in bindings: - policy = policy_mod.Policy.load(self.context, pb.policy_id, - project_safe=False) - - # add last_op as input for the policy so that it can be used - # during pre_op - self.inputs['last_op'] = pb.last_op - - if not policy.need_check(target, self): - continue - - if target == 'BEFORE': - method = getattr(policy, 'pre_op', None) - else: # target == 'AFTER' - method = getattr(policy, 'post_op', None) - - # call policy check function - # it will set result in data['status'] - if method is not None: - method(cluster_id, self) - - # stop policy check is one of them fails - if self.data['status'] == policy_mod.CHECK_ERROR: - reason = self.data['reason'] - self.data['reason'] = ("Failed policy '%(name)s': %(reason)s" - ) % {'name': policy.name, - 'reason': reason} - return - - self.data['status'] = policy_mod.CHECK_OK - self.data['reason'] = 'Completed policy checking.' - - return - - @staticmethod - def validate_scaling_action(ctx, cluster_id, action): - """Validate scaling action against actions table and policy cooldown. - - :param ctx: An instance of the request context. - :param cluster_id: ID of the cluster the scaling action is targeting. - :param action: Scaling action being validated. - :return: None - :raises: An exception of ``ActionCooldown`` when the action being - validated is still in cooldown based off the policy or - ``ActionConflict`` when a scaling action is already in the action - table. - """ - # Check for conflicting actions in the actions table. - conflicting_actions = Action._get_conflicting_scaling_actions( - ctx, cluster_id) - if conflicting_actions: - action_ids = [a.get('id', None) for a in conflicting_actions] - LOG.info("Unable to process %(action)s for cluster %(cluster_id)s " - "the action conflicts with %(conflicts)s", - {'action': action, - 'cluster_id': cluster_id, - 'conflicts': action_ids}) - raise exception.ActionConflict( - type=action, - target=cluster_id, - actions=",".join(action_ids)) - - # Check to see if action cooldown should be observed. - bindings = cpo.ClusterPolicy.get_all(ctx, cluster_id, - sort='priority', - filters={'enabled': True}) - for pb in bindings: - policy = policy_mod.Policy.load(ctx, pb.policy_id) - if getattr(policy, 'cooldown', None) and policy.event == action: - if pb.last_op and not timeutils.is_older_than( - pb.last_op, policy.cooldown): - LOG.info("Unable to process %(action)s for cluster " - "%(cluster_id)s the actions policy %(policy)s " - "cooldown still in progress", - {'action': action, - 'cluster_id': cluster_id, - 'policy': pb.policy_id}) - raise exception.ActionCooldown( - type=action, - cluster=cluster_id, - policy_id=pb.policy_id) - return - - @staticmethod - def _get_conflicting_scaling_actions(ctx, cluster_id): - """Check actions table for conflicting scaling actions. - - :param ctx: An instance of the request context. - :param cluster_id: ID of the cluster the scaling action is targeting. - :return: A list of conflicting actions. - """ - scaling_actions = ao.Action.action_list_active_scaling( - ctx, cluster_id) - if scaling_actions: - return [a.to_dict() for a in scaling_actions] - else: - return None - - def to_dict(self): - if self.id: - dep_on = dobj.Dependency.get_depended(self.context, self.id) - dep_by = dobj.Dependency.get_dependents(self.context, self.id) - else: - dep_on = [] - dep_by = [] - action_dict = { - 'id': self.id, - 'name': self.name, - 'cluster_id': self.cluster_id, - 'action': self.action, - 'target': self.target, - 'cause': self.cause, - 'owner': self.owner, - 'interval': self.interval, - 'start_time': self.start_time, - 'end_time': self.end_time, - 'timeout': self.timeout, - 'status': self.status, - 'status_reason': self.status_reason, - 'inputs': self.inputs, - 'outputs': self.outputs, - 'depends_on': dep_on, - 'depended_by': dep_by, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'data': self.data, - 'user': self.user, - 'project': self.project, - } - return action_dict - - -def ActionProc(ctx, action_id): - """Action process.""" - - # Step 1: materialize the action object - action = Action.load(ctx, action_id=action_id, project_safe=False) - if action is None: - LOG.error('Action "%s" could not be found.', action_id) - return False - - if action.is_cancelled(): - reason = '%(action)s [%(id)s] cancelled' % { - 'action': action.action, 'id': action.id[:8]} - action.set_status(action.RES_CANCEL, reason) - LOG.info(reason) - return True - - EVENT.info(action, consts.PHASE_START, action_id[:8]) - - reason = 'Action completed' - success = True - try: - # Step 2: execute the action - result, reason = action.execute() - if result == action.RES_RETRY: - success = False - except Exception as ex: - # We catch exception here to make sure the following logics are - # executed. - result = action.RES_ERROR - reason = str(ex) - LOG.exception('Unexpected exception occurred during action ' - '%(action)s (%(id)s) execution: %(reason)s', - {'action': action.action, 'id': action.id, - 'reason': reason}) - success = False - finally: - # NOTE: locks on action is eventually released here by status update - action.set_status(result, reason) - - return success diff --git a/senlin/engine/actions/cluster_action.py b/senlin/engine/actions/cluster_action.py deleted file mode 100644 index 00c8a0473..000000000 --- a/senlin/engine/actions/cluster_action.py +++ /dev/null @@ -1,1256 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet - -from oslo_log import log as logging -from oslo_utils import timeutils -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import exception -from senlin.common import scaleutils -from senlin.common import utils -from senlin.engine.actions import base -from senlin.engine import cluster as cluster_mod -from senlin.engine import dispatcher -from senlin.engine import node as node_mod -from senlin.engine.notifications import message as msg -from senlin.engine import senlin_lock -from senlin.objects import action as ao -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cp_obj -from senlin.objects import dependency as dobj -from senlin.objects import node as no -from senlin.objects import receiver as receiver_obj -from senlin.policies import base as policy_mod - -LOG = logging.getLogger(__name__) - - -class ClusterAction(base.Action): - """An action that can be performed on a cluster.""" - - def __init__(self, target, action, context, **kwargs): - """Constructor for cluster action. - - :param target: ID of the target cluster. - :param action: Name of the action to be executed. - :param context: Context used when interacting with DB layer. - :param dict kwargs: Other optional arguments for the action. - """ - super(ClusterAction, self).__init__(target, action, context, **kwargs) - - try: - self.entity = cluster_mod.Cluster.load(self.context, self.target) - self.timeout = self.entity.timeout - except Exception: - self.entity = None - - def _sleep(self, period): - if period: - eventlet.sleep(period) - - def _wait_for_dependents(self, lifecycle_hook_timeout=None): - """Wait for dependent actions to complete. - - :returns: A tuple containing the result and the corresponding reason. - """ - status = self.get_status() - while status != self.READY: - if status == self.FAILED: - reason = ('%(action)s [%(id)s] failed' % { - 'action': self.action, 'id': self.id[:8]}) - LOG.debug(reason) - return self.RES_ERROR, reason - - if self.is_cancelled(): - # During this period, if cancel request comes, cancel this - # operation immediately after signaling children to cancel, - # then release the cluster lock - reason = ('%(action)s [%(id)s] cancelled' % { - 'action': self.action, 'id': self.id[:8]}) - LOG.debug(reason) - return self.RES_CANCEL, reason - - # When a child action is cancelled the parent action will update - # its status to cancelled as well this allows it to exit. - if status == self.CANCELLED: - if self.check_children_complete(): - reason = ('%(action)s [%(id)s] cancelled' % { - 'action': self.action, 'id': self.id[:8]}) - LOG.debug(reason) - return self.RES_CANCEL, reason - - if self.is_timeout(): - # Action timeout, return - reason = ('%(action)s [%(id)s] timeout' % { - 'action': self.action, 'id': self.id[:8]}) - LOG.debug(reason) - return self.RES_TIMEOUT, reason - - if (lifecycle_hook_timeout is not None and - self.is_timeout(lifecycle_hook_timeout)): - # if lifecycle hook timeout is specified and Lifecycle hook - # timeout is reached, return - reason = ('%(action)s [%(id)s] lifecycle hook timeout' - '') % {'action': self.action, 'id': self.id[:8]} - LOG.debug(reason) - return self.RES_LIFECYCLE_HOOK_TIMEOUT, reason - - # Continue waiting (with reschedule) - LOG.debug('Action %s sleep for 3 seconds ', self.id) - self._sleep(3) - status = self.get_status() - dispatcher.start_action() - - return self.RES_OK, 'All dependents ended with success' - - def check_children_complete(self): - depended = dobj.Dependency.get_depended(self.context, self.id) - if not depended: - return True - - for child in depended: - # Try to cancel all dependant actions - action = base.Action.load(self.context, action_id=child) - if action.get_status() not in (action.CANCELLED, action.SUCCEEDED, - action.FAILED): - return False - return True - - def _create_nodes(self, count): - """Utility method for node creation. - - :param count: Number of nodes to create. - :returns: A tuple comprised of the result and reason. - """ - - if count == 0: - return self.RES_OK, '' - - placement = self.data.get('placement', None) - - nodes = [] - child = [] - # conunt >= 1 - for m in range(count): - index = co.Cluster.get_next_index(self.context, self.entity.id) - kwargs = { - 'index': index, - 'metadata': {}, - 'user': self.entity.user, - 'project': self.entity.project, - 'domain': self.entity.domain, - } - if placement is not None: - # We assume placement is a list - kwargs['data'] = {'placement': placement['placements'][m]} - - name_format = self.entity.config.get("node.name.format", "") - name = utils.format_node_name(name_format, self.entity, index) - node = node_mod.Node(name, self.entity.profile_id, - self.entity.id, context=self.context, - **kwargs) - - node.store(self.context) - nodes.append(node) - - kwargs = { - 'name': 'node_create_%s' % node.id[:8], - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED, - } - action_id = base.Action.create(self.context, node.id, - consts.NODE_CREATE, **kwargs) - child.append(action_id) - - # Build dependency and make the new action ready - dobj.Dependency.create(self.context, [a for a in child], self.id) - for cid in child: - ao.Action.update(self.context, cid, - {'status': base.Action.READY}) - dispatcher.start_action() - - # Wait for cluster creation to complete - res, reason = self._wait_for_dependents() - if res == self.RES_OK: - nodes_added = [n.id for n in nodes] - self.outputs['nodes_added'] = nodes_added - creation = self.data.get('creation', {}) - creation['nodes'] = nodes_added - self.data['creation'] = creation - for node in nodes: - self.entity.add_node(node) - else: - reason = 'Failed in creating nodes.' - - return res, reason - - @profiler.trace('ClusterAction.do_create', hide_args=False) - def do_create(self): - """Handler for CLUSTER_CREATE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - res = self.entity.do_create(self.context) - - if not res: - reason = 'Cluster creation failed.' - self.entity.set_status(self.context, consts.CS_ERROR, reason) - return self.RES_ERROR, reason - - result, reason = self._create_nodes(self.entity.desired_capacity) - - params = {} - if result == self.RES_OK: - reason = 'Cluster creation succeeded.' - params = {'created_at': timeutils.utcnow(True)} - self.entity.eval_status(self.context, consts.CLUSTER_CREATE, **params) - - return result, reason - - def _update_nodes(self, profile_id, nodes_obj): - # Get batching policy data if any - LOG.info("Updating cluster '%(cluster)s': profile='%(profile)s'.", - {'cluster': self.entity.id, 'profile': profile_id}) - plan = [] - - pd = self.data.get('update', None) - if pd: - pause_time = pd.get('pause_time') - plan = pd.get('plan') - else: - pause_time = 0 - nodes_list = [] - for node in self.entity.nodes: - nodes_list.append(node.id) - plan.append(set(nodes_list)) - - for node_set in plan: - child = [] - nodes = list(node_set) - nodes.sort() - - for node in nodes: - kwargs = { - 'name': 'node_update_%s' % node[:8], - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': self.entity.config, - } - kwargs['inputs']['new_profile_id'] = profile_id - - action_id = base.Action.create(self.context, node, - consts.NODE_UPDATE, **kwargs) - child.append(action_id) - - if child: - dobj.Dependency.create(self.context, [c for c in child], - self.id) - for cid in child: - ao.Action.update(self.context, cid, - {'status': base.Action.READY}) - - dispatcher.start_action() - # clear the action list - child = [] - result, new_reason = self._wait_for_dependents() - if result != self.RES_OK: - self.entity.eval_status(self.context, - consts.CLUSTER_UPDATE) - return result, 'Failed in updating nodes.' - # pause time - if pause_time != 0: - self._sleep(pause_time) - - self.entity.profile_id = profile_id - self.entity.eval_status(self.context, consts.CLUSTER_UPDATE, - profile_id=profile_id, - updated_at=timeutils.utcnow(True)) - return self.RES_OK, 'Cluster update completed.' - - @profiler.trace('ClusterAction.do_update', hide_args=False) - def do_update(self): - """Handler for CLUSTER_UPDATE action. - - :returns: A tuple consisting the result and the corresponding reason. - """ - res = self.entity.do_update(self.context) - if not res: - reason = 'Cluster update failed.' - self.entity.set_status(self.context, consts.CS_ERROR, reason) - return self.RES_ERROR, reason - - config = self.inputs.get('config') - name = self.inputs.get('name') - metadata = self.inputs.get('metadata') - timeout = self.inputs.get('timeout') - profile_id = self.inputs.get('new_profile_id') - profile_only = self.inputs.get('profile_only') - - if config is not None: - # make sure config values are valid - try: - stop_timeout = config.get('cluster.stop_timeout_before_update') - if stop_timeout: - config['cluster.stop_timeout_before_update'] = int( - stop_timeout) - except Exception as e: - return self.RES_ERROR, str(e) - - self.entity.config = config - if name is not None: - self.entity.name = name - if metadata is not None: - self.entity.metadata = metadata - if timeout is not None: - self.entity.timeout = timeout - self.entity.store(self.context) - - reason = 'Cluster update completed.' - if profile_id is None: - self.entity.eval_status(self.context, consts.CLUSTER_UPDATE, - updated_at=timeutils.utcnow(True)) - return self.RES_OK, reason - - # profile_only's type is bool - if profile_only: - self.entity.profile_id = profile_id - self.entity.eval_status(self.context, consts.CLUSTER_UPDATE, - profile_id=profile_id, - updated_at=timeutils.utcnow(True)) - return self.RES_OK, reason - - # Update nodes with new profile - result, reason = self._update_nodes(profile_id, self.entity.nodes) - return result, reason - - def _handle_lifecycle_timeout(self, child): - for action_id, node_id in child: - status = ao.Action.check_status(self.context, action_id, 0) - if (status == consts.ACTION_WAITING_LIFECYCLE_COMPLETION): - # update action status and reset owner back to None - # so that the action will get picked up by dispatcher - ao.Action.update(self.context, action_id, - {'status': base.Action.READY, - 'owner': None}) - - def _remove_nodes_with_hook(self, action_name, node_ids, lifecycle_hook, - inputs=None): - lifecycle_hook_timeout = lifecycle_hook.get('timeout') - lifecycle_hook_type = lifecycle_hook.get('type', None) - lifecycle_hook_params = lifecycle_hook.get('params') - if lifecycle_hook_type == "zaqar": - lifecycle_hook_target = lifecycle_hook_params.get('queue') - else: - # lifecycle_hook_target = lifecycle_hook_params.get('url') - return self.RES_ERROR, ("Lifecycle hook type '%s' is not " - "implemented") % lifecycle_hook_type - child = [] - for node_id in node_ids: - kwargs = { - 'name': 'node_delete_%s' % node_id[:8], - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED_LCH, - 'inputs': inputs or {}, - } - - action_id = base.Action.create(self.context, node_id, action_name, - **kwargs) - child.append((action_id, node_id)) - - if child: - dobj.Dependency.create(self.context, [aid for aid, nid in child], - self.id) - # lifecycle_hook_type has to be "zaqar" - # post message to zaqar - kwargs = { - 'user': self.context.user_id, - 'project': self.context.project_id, - 'domain': self.context.domain_id - } - - notifier = msg.Message(lifecycle_hook_target, **kwargs) - - child_copy = list(child) - for action_id, node_id in child_copy: - # wait lifecycle complete if node exists and is active - node = no.Node.get(self.context, node_id) - owner = None - if not node: - LOG.warning('Node %s is not found. ' - 'Skipping wait for lifecycle completion.', - node_id) - status = base.Action.READY - child.remove((action_id, node_id)) - elif node.status != consts.NS_ACTIVE or not node.physical_id: - LOG.warning('Node %s is not in ACTIVE status. ' - 'Skipping wait for lifecycle completion.', - node_id) - status = base.Action.READY - child.remove((action_id, node_id)) - else: - status = base.Action.WAITING_LIFECYCLE_COMPLETION - # set owner for actions in waiting for lifecycle completion - # so that they will get cleaned up by dead engine gc - # if the engine dies - owner = self.owner - - ao.Action.update(self.context, action_id, - {'status': status, - 'owner': owner}) - if status == base.Action.WAITING_LIFECYCLE_COMPLETION: - notifier.post_lifecycle_hook_message( - action_id, node_id, node.physical_id, - consts.LIFECYCLE_NODE_TERMINATION) - - dispatcher.start_action() - res, reason = self._wait_for_dependents(lifecycle_hook_timeout) - - if res == self.RES_LIFECYCLE_HOOK_TIMEOUT: - self._handle_lifecycle_timeout(child) - - if res is None or res == self.RES_LIFECYCLE_HOOK_TIMEOUT: - dispatcher.start_action() - res, reason = self._wait_for_dependents() - - return res, reason - - return self.RES_OK, '' - - def _remove_nodes_normally(self, action_name, node_ids, inputs=None): - child = [] - for node_id in node_ids: - kwargs = { - 'name': 'node_delete_%s' % node_id[:8], - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': inputs or {}, - } - - action_id = base.Action.create(self.context, node_id, action_name, - **kwargs) - child.append((action_id, node_id)) - - if child: - dobj.Dependency.create(self.context, [aid for aid, nid in child], - self.id) - for action_id, node_id in child: - ao.Action.update(self.context, action_id, - {'status': base.Action.READY}) - - dispatcher.start_action() - res, reason = self._wait_for_dependents() - return res, reason - - return self.RES_OK, '' - - def _delete_nodes(self, node_ids): - action_name = consts.NODE_DELETE - - pd = self.data.get('deletion', None) - if pd is not None: - destroy = pd.get('destroy_after_deletion', True) - if destroy is False: - action_name = consts.NODE_LEAVE - - stop_node_before_delete = self.entity.config.get( - "cluster.stop_node_before_delete", False) - - # get lifecycle hook properties if specified - lifecycle_hook = self.data.get('hooks') - if lifecycle_hook: - if stop_node_before_delete: - # set update_parent_status to False so that a failure in stop - # operation is ignored and the parent status is not changed - res, reason = self._remove_nodes_with_hook( - consts.NODE_OPERATION, node_ids, lifecycle_hook, - {'operation': 'stop', 'update_parent_status': False}) - if res != self.RES_OK: - LOG.warning('Failure while stopping nodes. ' - 'Proceed to delete nodes.') - res, reason = self._remove_nodes_normally(action_name, - node_ids) - else: - res, reason = self._remove_nodes_with_hook( - action_name, node_ids, lifecycle_hook) - else: - if stop_node_before_delete: - # set update_parent_status to False so that a failure in stop - # operation is ignored and the parent status is not changed - res, reason = self._remove_nodes_normally( - consts.NODE_OPERATION, node_ids, - {'operation': 'stop', 'update_parent_status': False}) - if res != self.RES_OK: - LOG.warning('Failure while stopping nodes. ' - 'Proceed to delete nodes.') - res, reason = self._remove_nodes_normally(action_name, node_ids) - - if res == self.RES_OK: - self.outputs['nodes_removed'] = node_ids - for node_id in node_ids: - self.entity.remove_node(node_id) - else: - reason = 'Failed in deleting nodes: %s' % reason - - return res, reason - - @profiler.trace('ClusterAction.do_delete', hide_args=False) - def do_delete(self): - """Handler for the CLUSTER_DELETE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - # Detach policies before delete - policies = cp_obj.ClusterPolicy.get_all(self.context, self.entity.id) - for policy in policies: - res, reason = self.entity.detach_policy(self.context, - policy.policy_id) - if res: - self.entity.store(self.context) - else: - return self.RES_ERROR, ("Unable to detach policy {} before " - "deletion.".format(policy.id)) - # Delete receivers - receivers = receiver_obj.Receiver.get_all( - self.context, filters={'cluster_id': self.entity.id}) - for receiver in receivers: - receiver_obj.Receiver.delete(self.context, receiver.id) - - reason = 'Deletion in progress.' - self.entity.set_status(self.context, consts.CS_DELETING, reason) - node_ids = [node.id for node in self.entity.nodes] - - # For cluster delete, we delete the nodes - data = { - 'deletion': { - 'destroy_after_deletion': True - } - } - self.data.update(data) - - result, reason = self._delete_nodes(node_ids) - if result != self.RES_OK: - self.entity.eval_status(self.context, consts.CLUSTER_DELETE) - return result, reason - - res = self.entity.do_delete(self.context) - if not res: - self.entity.eval_status(self.context, consts.CLUSTER_DELETE) - return self.RES_ERROR, 'Cannot delete cluster object.' - - return self.RES_OK, reason - - @profiler.trace('ClusterAction.do_add_nodes', hide_args=False) - def do_add_nodes(self): - """Handler for the CLUSTER_ADD_NODES action. - - TODO(anyone): handle placement data - - :returns: A tuple containing the result and the corresponding reason. - """ - node_ids = self.inputs.get('nodes') - errors = [] - nodes = [] - for nid in node_ids: - node = no.Node.get(self.context, nid) - if not node: - errors.append('Node %s is not found.' % nid) - continue - - if node.cluster_id: - errors.append('Node %(n)s is already owned by cluster %(c)s.' - '' % {'n': nid, 'c': node.cluster_id}) - continue - - if node.status != consts.NS_ACTIVE: - errors.append('Node %s is not in ACTIVE status.' % nid) - continue - - nodes.append(node) - - if len(errors) > 0: - return self.RES_ERROR, '\n'.join(errors) - - reason = 'Completed adding nodes.' - # check the size constraint - current = no.Node.count_by_cluster(self.context, self.target) - desired = current + len(node_ids) - res = scaleutils.check_size_params(self.entity, desired, None, - None, True) - if res: - return self.RES_ERROR, res - - child = [] - for node in nodes: - nid = node.id - kwargs = { - 'name': 'node_join_%s' % nid[:8], - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': {'cluster_id': self.target}, - } - action_id = base.Action.create(self.context, nid, consts.NODE_JOIN, - **kwargs) - child.append(action_id) - - if child: - dobj.Dependency.create(self.context, [c for c in child], self.id) - for cid in child: - ao.Action.update(self.context, cid, - {'status': base.Action.READY}) - dispatcher.start_action() - - # Wait for dependent action if any - result, new_reason = self._wait_for_dependents() - if result != self.RES_OK: - reason = new_reason - else: - self.entity.eval_status(self.context, consts.CLUSTER_ADD_NODES, - desired_capacity=desired) - self.outputs['nodes_added'] = node_ids - creation = self.data.get('creation', {}) - creation['nodes'] = node_ids - self.data['creation'] = creation - for node in nodes: - obj = node_mod.Node.load(self.context, db_node=node) - self.entity.add_node(obj) - - return result, reason - - @profiler.trace('ClusterAction.do_del_nodes', hide_args=False) - def do_del_nodes(self): - """Handler for the CLUSTER_DEL_NODES action. - - :returns: A tuple containing the result and the corresponding reason. - """ - # Use policy decision if any, or fall back to defaults - destroy_after_deletion = self.inputs.get('destroy_after_deletion', - False) - grace_period = 0 - reduce_desired_capacity = True - pd = self.data.get('deletion', None) - if pd is not None: - destroy_after_deletion = pd.get('destroy_after_deletion', False) - grace_period = pd.get('grace_period', 0) - reduce_desired_capacity = pd.get('reduce_desired_capacity', True) - - data = { - 'deletion': { - 'destroy_after_deletion': destroy_after_deletion, - 'grace_period': grace_period, - 'reduce_desired_capacity': reduce_desired_capacity, - } - } - self.data.update(data) - nodes = self.inputs.get('candidates', []) - - node_ids = copy.deepcopy(nodes) - errors = [] - for node_id in node_ids: - node = no.Node.get(self.context, node_id) - - # The return value is None if node not found - if not node: - errors.append(node_id) - continue - - if ((not node.cluster_id) or (node.cluster_id != self.target)): - nodes.remove(node_id) - - if len(errors) > 0: - msg = "Nodes not found: %s." % errors - return self.RES_ERROR, msg - - reason = 'Completed deleting nodes.' - if len(nodes) == 0: - return self.RES_OK, reason - - # check the size constraint - current = no.Node.count_by_cluster(self.context, self.target) - desired = current - len(nodes) - res = scaleutils.check_size_params(self.entity, desired, None, - None, True) - if res: - return self.RES_ERROR, res - - # sleep period - self._sleep(grace_period) - result, new_reason = self._delete_nodes(nodes) - - params = {} - if result != self.RES_OK: - reason = new_reason - if reduce_desired_capacity: - params['desired_capacity'] = desired - - self.entity.eval_status(self.context, - consts.CLUSTER_DEL_NODES, **params) - - return result, reason - - @profiler.trace('ClusterAction.do_replace_nodes', hide_args=False) - def do_replace_nodes(self): - """Handler for the CLUSTER_REPLACE_NODES action. - - :returns: A tuple containing the result and the corresponding reason. - """ - node_dict = self.inputs.get('candidates') - if not node_dict: - return ( - self.RES_ERROR, - 'Candidates must be a non-empty dict.' - ' Instead got {}'.format(node_dict)) - - errors = [] - original_nodes = [] - replacement_nodes = [] - for (original, replacement) in node_dict.items(): - original_node = no.Node.get(self.context, original) - replacement_node = no.Node.get(self.context, replacement) - - # The return value is None if node not found - if not original_node: - errors.append('Original node %s not found.' % original) - continue - if not replacement_node: - errors.append('Replacement node %s not found.' % replacement) - continue - if original_node.cluster_id != self.target: - errors.append('Node %(o)s is not a member of the ' - 'cluster %(c)s.' % {'o': original, - 'c': self.target}) - continue - if replacement_node.cluster_id: - errors.append(('Node %(r)s is already owned by cluster %(c)s.' - ) % {'r': replacement, - 'c': replacement_node.cluster_id}) - continue - if replacement_node.status != consts.NS_ACTIVE: - errors.append('Node %s is not in ACTIVE status.' % replacement) - continue - original_nodes.append(original_node) - replacement_nodes.append(replacement_node) - - if len(errors) > 0: - return self.RES_ERROR, '\n'.join(errors) - - result = self.RES_OK - reason = 'Completed replacing nodes.' - - children = [] - for (original, replacement) in node_dict.items(): - kwargs = { - 'cluster_id': self.entity.id, - 'cause': consts.CAUSE_DERIVED, - } - - # node_leave action - kwargs['name'] = 'node_leave_%s' % original[:8] - leave_action_id = base.Action.create(self.context, original, - consts.NODE_LEAVE, **kwargs) - # node_join action - kwargs['name'] = 'node_join_%s' % replacement[:8] - kwargs['inputs'] = {'cluster_id': self.target} - join_action_id = base.Action.create(self.context, replacement, - consts.NODE_JOIN, **kwargs) - - children.append((join_action_id, leave_action_id)) - - if children: - dobj.Dependency.create(self.context, [c[0] for c in children], - self.id) - for child in children: - join_id = child[0] - leave_id = child[1] - ao.Action.update(self.context, join_id, - {'status': base.Action.READY}) - - dobj.Dependency.create(self.context, [join_id], leave_id) - ao.Action.update(self.context, leave_id, - {'status': base.Action.READY}) - - dispatcher.start_action() - - result, new_reason = self._wait_for_dependents() - if result != self.RES_OK: - reason = new_reason - else: - for n in range(len(original_nodes)): - self.entity.remove_node(original_nodes[n]) - self.entity.add_node(replacement_nodes[n]) - - self.entity.eval_status(self.context, consts.CLUSTER_REPLACE_NODES) - return result, reason - - @profiler.trace('ClusterAction.do_check', hide_args=False) - def do_check(self): - """Handler for CLUSTER_CHECK action. - - :returns: A tuple containing the result and the corresponding reason. - """ - self.entity.do_check(self.context) - - child = [] - res = self.RES_OK - reason = 'Cluster checking completed.' - for node in self.entity.nodes: - node_id = node.id - need_delete = self.inputs.get('delete_check_action', False) - # delete some records of NODE_CHECK - if need_delete: - ao.Action.delete_by_target( - self.context, node_id, action=[consts.NODE_CHECK], - status=[consts.ACTION_SUCCEEDED, consts.ACTION_FAILED]) - - name = 'node_check_%s' % node_id[:8] - action_id = base.Action.create( - self.context, node_id, consts.NODE_CHECK, name=name, - cause=consts.CAUSE_DERIVED, - inputs=self.inputs - ) - - child.append(action_id) - - if child: - dobj.Dependency.create(self.context, [c for c in child], self.id) - for cid in child: - ao.Action.update(self.context, cid, - {'status': base.Action.READY}) - dispatcher.start_action() - - # Wait for dependent action if any - res, new_reason = self._wait_for_dependents() - if res != self.RES_OK: - reason = new_reason - - self.entity.eval_status(self.context, consts.CLUSTER_CHECK) - return res, reason - - def _check_capacity(self): - cluster = self.entity - - current = len(cluster.nodes) - desired = cluster.desired_capacity - - if current < desired: - count = desired - current - self._create_nodes(count) - - if current > desired: - count = current - desired - nodes = no.Node.get_all_by_cluster(self.context, cluster.id) - candidates = scaleutils.nodes_by_random(nodes, count) - self._delete_nodes(candidates) - - @profiler.trace('ClusterAction.do_recover', hide_args=False) - def do_recover(self): - """Handler for the CLUSTER_RECOVER action. - - :returns: A tuple containing the result and the corresponding reason. - """ - self.entity.do_recover(self.context) - - inputs = {} - - check = self.inputs.get('check', False) - inputs['operation'] = self.inputs.get('operation', None) - inputs['operation_params'] = self.inputs.get('operation_params', None) - - children = [] - for node in self.entity.nodes: - node_id = node.id - if check: - node = node_mod.Node.load(self.context, node_id=node_id) - node.do_check(self.context) - - if node.status == consts.NS_ACTIVE: - continue - action_id = base.Action.create( - self.context, node_id, consts.NODE_RECOVER, - name='node_recover_%s' % node_id[:8], - cause=consts.CAUSE_DERIVED, inputs=inputs, - ) - children.append(action_id) - - res = self.RES_OK - reason = 'Cluster recovery succeeded.' - if children: - dobj.Dependency.create(self.context, [c for c in children], - self.id) - for cid in children: - ao.Action.update(self.context, cid, - {'status': consts.ACTION_READY}) - dispatcher.start_action() - - # Wait for dependent action if any - res, new_reason = self._wait_for_dependents() - if res != self.RES_OK: - reason = new_reason - - check_capacity = self.inputs.get('check_capacity', False) - if check_capacity is True: - self._check_capacity() - - self.entity.eval_status(self.context, consts.CLUSTER_RECOVER) - return res, reason - - def _update_cluster_size(self, desired): - """Private function for updating cluster properties.""" - kwargs = {'desired_capacity': desired} - min_size = self.inputs.get(consts.ADJUSTMENT_MIN_SIZE, None) - max_size = self.inputs.get(consts.ADJUSTMENT_MAX_SIZE, None) - if min_size is not None: - kwargs['min_size'] = min_size - if max_size is not None: - kwargs['max_size'] = max_size - self.entity.set_status(self.context, consts.CS_RESIZING, - 'Cluster resize started.', **kwargs) - - @profiler.trace('ClusterAction.do_resize', hide_args=False) - def do_resize(self): - """Handler for the CLUSTER_RESIZE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - # if no policy decision(s) found, use policy inputs directly, - # Note the 'parse_resize_params' function is capable of calculating - # desired capacity and handling best effort scaling. It also verifies - # that the inputs are valid - curr_capacity = no.Node.count_by_cluster(self.context, self.entity.id) - if 'creation' not in self.data and 'deletion' not in self.data: - result, reason = scaleutils.parse_resize_params(self, self.entity, - curr_capacity) - if result != self.RES_OK: - return result, reason - - # action input consolidated to action data now - reason = 'Cluster resize succeeded.' - if 'deletion' in self.data: - count = self.data['deletion']['count'] - candidates = self.data['deletion'].get('candidates', []) - - # Choose victims randomly if not already picked - if not candidates: - node_list = self.entity.nodes - candidates = scaleutils.nodes_by_random(node_list, count) - - self._update_cluster_size(curr_capacity - count) - - grace_period = self.data['deletion'].get('grace_period', 0) - self._sleep(grace_period) - result, new_reason = self._delete_nodes(candidates) - else: - # 'creation' in self.data: - count = self.data['creation']['count'] - self._update_cluster_size(curr_capacity + count) - result, new_reason = self._create_nodes(count) - - if result != self.RES_OK: - reason = new_reason - - self.entity.eval_status(self.context, consts.CLUSTER_RESIZE) - return result, reason - - @profiler.trace('ClusterAction.do_scale_out', hide_args=False) - def do_scale_out(self): - """Handler for the CLUSTER_SCALE_OUT action. - - :returns: A tuple containing the result and the corresponding reason. - """ - # We use policy output if any, or else the count is - # set to 1 as default. - pd = self.data.get('creation', None) - if pd is not None: - count = pd.get('count', 1) - else: - # If no scaling policy is attached, use the input count directly - value = self.inputs.get('count', 1) - success, count = utils.get_positive_int(value) - if not success: - reason = 'Invalid count (%s) for scaling out.' % value - return self.RES_ERROR, reason - - # check provided params against current properties - # desired is checked when strict is True - curr_size = no.Node.count_by_cluster(self.context, self.target) - new_size = curr_size + count - result = scaleutils.check_size_params(self.entity, new_size, - None, None, True) - if result: - return self.RES_ERROR, result - - self.entity.set_status(self.context, consts.CS_RESIZING, - 'Cluster scale out started.', - desired_capacity=new_size) - - result, reason = self._create_nodes(count) - if result == self.RES_OK: - reason = 'Cluster scaling succeeded.' - self.entity.eval_status(self.context, consts.CLUSTER_SCALE_OUT) - - return result, reason - - @profiler.trace('ClusterAction.do_scale_in', hide_args=False) - def do_scale_in(self): - """Handler for the CLUSTER_SCALE_IN action. - - :returns: A tuple containing the result and the corresponding reason. - """ - # We use policy data if any, deletion policy and scaling policy might - # be attached. - pd = self.data.get('deletion', None) - grace_period = 0 - if pd: - grace_period = pd.get('grace_period', 0) - candidates = pd.get('candidates', []) - # if scaling policy is attached, get 'count' from action data - count = len(candidates) or pd['count'] - else: - # If no scaling policy is attached, use the input count directly - candidates = [] - value = self.inputs.get('count', 1) - success, count = utils.get_positive_int(value) - if not success: - reason = 'Invalid count (%s) for scaling in.' % value - return self.RES_ERROR, reason - - # check provided params against current properties - # desired is checked when strict is True - curr_size = no.Node.count_by_cluster(self.context, self.target) - if count > curr_size: - LOG.warning("Triming count (%(count)s) to current cluster size " - "(%(curr)s) for scaling in", - {'count': count, 'curr': curr_size}) - count = curr_size - new_size = curr_size - count - - result = scaleutils.check_size_params(self.entity, new_size, - None, None, True) - if result: - return self.RES_ERROR, result - - self.entity.set_status(self.context, consts.CS_RESIZING, - 'Cluster scale in started.', - desired_capacity=new_size) - - # Choose victims randomly - if len(candidates) == 0: - candidates = scaleutils.nodes_by_random(self.entity.nodes, count) - - # Sleep period - self._sleep(grace_period) - - result, reason = self._delete_nodes(candidates) - - if result == self.RES_OK: - reason = 'Cluster scaling succeeded.' - - self.entity.eval_status(self.context, consts.CLUSTER_SCALE_IN) - - return result, reason - - @profiler.trace('ClusterAction.do_attach_policy', hide_args=False) - def do_attach_policy(self): - """Handler for the CLUSTER_ATTACH_POLICY action. - - :returns: A tuple containing the result and the corresponding reason. - """ - inputs = dict(self.inputs) - policy_id = inputs.pop('policy_id', None) - if not policy_id: - return self.RES_ERROR, 'Policy not specified.' - - res, reason = self.entity.attach_policy(self.context, policy_id, - inputs) - result = self.RES_OK if res else self.RES_ERROR - - # Store cluster since its data could have been updated - if result == self.RES_OK: - self.entity.store(self.context) - - return result, reason - - @profiler.trace('ClusterAction.do_detach_policy', hide_args=False) - def do_detach_policy(self): - """Handler for the CLUSTER_DETACH_POLICY action. - - :returns: A tuple containing the result and the corresponding reason. - """ - policy_id = self.inputs.get('policy_id', None) - if not policy_id: - return self.RES_ERROR, 'Policy not specified.' - - res, reason = self.entity.detach_policy(self.context, policy_id) - result = self.RES_OK if res else self.RES_ERROR - - # Store cluster since its data could have been updated - if result == self.RES_OK: - self.entity.store(self.context) - - return result, reason - - @profiler.trace('ClusterAction.do_update_policy', hide_args=False) - def do_update_policy(self): - """Handler for the CLUSTER_UPDATE_POLICY action. - - :returns: A tuple containing the result and the corresponding reason. - """ - policy_id = self.inputs.pop('policy_id', None) - if not policy_id: - return self.RES_ERROR, 'Policy not specified.' - res, reason = self.entity.update_policy(self.context, policy_id, - **self.inputs) - result = self.RES_OK if res else self.RES_ERROR - return result, reason - - @profiler.trace('ClusterAction.do_operation', hide_args=False) - def do_operation(self): - """Handler for CLUSTER_OPERATION action. - - Note that the inputs for the action should contain the following items: - - * ``nodes``: The nodes to operate on; - * ``operation``: The operation to be performed; - * ``params``: The parameters corresponding to the operation. - - :returns: A tuple containing the result and the corresponding reason. - """ - inputs = copy.deepcopy(self.inputs) - operation = inputs['operation'] - self.entity.do_operation(self.context, operation=operation) - - child = [] - res = self.RES_OK - reason = "Cluster operation '%s' completed." % operation - nodes = inputs.pop('nodes') - for node_id in nodes: - action_id = base.Action.create( - self.context, node_id, consts.NODE_OPERATION, - name='node_%s_%s' % (operation, node_id[:8]), - cause=consts.CAUSE_DERIVED, - inputs=inputs, - ) - child.append(action_id) - - if child: - dobj.Dependency.create(self.context, [c for c in child], self.id) - for cid in child: - ao.Action.update(self.context, cid, - {'status': base.Action.READY}) - dispatcher.start_action() - - # Wait for dependent action if any - res, new_reason = self._wait_for_dependents() - if res != self.RES_OK: - reason = new_reason - - self.entity.eval_status(self.context, operation) - return res, reason - - def _execute(self, **kwargs): - """Private method for action execution. - - This function search for the handler based on the action name for - execution and it wraps the action execution with policy checks. - - :returns: A tuple containing the result and the corresponding reason. - """ - # do pre-action policy checking - self.policy_check(self.entity.id, 'BEFORE') - if self.data['status'] != policy_mod.CHECK_OK: - reason = 'Policy check failure: %s' % self.data['reason'] - return self.RES_ERROR, reason - - result = self.RES_OK - action_name = self.action.lower() - method_name = action_name.replace('cluster', 'do') - method = getattr(self, method_name, None) - if method is None: - reason = 'Unsupported action: %s.' % self.action - return self.RES_ERROR, reason - - result, reason = method() - - # do post-action policy checking - self.inputs['action_result'] = result - self.policy_check(self.entity.id, 'AFTER') - if self.data['status'] != policy_mod.CHECK_OK: - reason = 'Policy check failure: %s' % self.data['reason'] - return self.RES_ERROR, reason - - return result, reason - - def execute(self, **kwargs): - """Wrapper of action execution. - - This is mainly a wrapper that executes an action with cluster lock - acquired. - - :returns: A tuple (res, reason) that indicates whether the execution - was a success and why if it wasn't a success. - """ - # Try to lock cluster before do real operation - forced = (self.action == consts.CLUSTER_DELETE) - res = senlin_lock.cluster_lock_acquire(self.context, self.target, - self.id, self.owner, - senlin_lock.CLUSTER_SCOPE, - forced) - # Failed to acquire lock, return RES_RETRY - if not res: - return self.RES_RETRY, 'Failed in locking cluster.' - - try: - # Refresh entity state to avoid stale data in action. - self.entity = cluster_mod.Cluster.load(self.context, self.target) - res, reason = self._execute(**kwargs) - finally: - senlin_lock.cluster_lock_release(self.target, self.id, - senlin_lock.CLUSTER_SCOPE) - - return res, reason - - def cancel(self): - """Handler to cancel the execution of action.""" - return self.RES_OK - - def release_lock(self): - """Handler to release the lock.""" - senlin_lock.cluster_lock_release(self.target, self.id, - senlin_lock.CLUSTER_SCOPE) - return self.RES_OK - - -def CompleteLifecycleProc(context, action_id): - """Complete lifecycle process.""" - - action = base.Action.load(context, action_id=action_id, project_safe=False) - if action is None: - LOG.error("Action %s could not be found.", action_id) - raise exception.ResourceNotFound(type='action', id=action_id) - - if action.get_status() == consts.ACTION_WAITING_LIFECYCLE_COMPLETION: - # update action status and reset owner back to None - # so that the action will get picked up by dispatcher - ao.Action.update(context, action_id, - {'status': consts.ACTION_READY, - 'status_reason': 'Lifecycle complete.', - 'owner': None}) - dispatcher.start_action() - else: - LOG.debug('Action %s status is not WAITING_LIFECYCLE. ' - 'Skip CompleteLifecycleProc', action_id) - return False - - return True diff --git a/senlin/engine/actions/custom_action.py b/senlin/engine/actions/custom_action.py deleted file mode 100644 index 5842c32c1..000000000 --- a/senlin/engine/actions/custom_action.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from senlin.engine.actions import base - - -class CustomAction(base.Action): - ACTIONS = ( - ACTION_EXECUTE, - ) = ( - 'ACTION_EXECUTE', - ) - - def execute(self, **kwargs): - return self.RES_OK, '' diff --git a/senlin/engine/actions/node_action.py b/senlin/engine/actions/node_action.py deleted file mode 100644 index f4463f1b9..000000000 --- a/senlin/engine/actions/node_action.py +++ /dev/null @@ -1,297 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from oslo_log import log as logging -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import scaleutils as su -from senlin.engine.actions import base -from senlin.engine import cluster as cm -from senlin.engine import event as EVENT -from senlin.engine import node as node_mod -from senlin.engine import senlin_lock -from senlin.objects import node as no -from senlin.policies import base as pb - -LOG = logging.getLogger(__name__) - - -class NodeAction(base.Action): - """An action that can be performed on a cluster member (node).""" - - def __init__(self, target, action, context, **kwargs): - """Constructor for a node action object. - - :param target: ID of the target node object on which the action is to - be executed. - :param action: The name of the action to be executed. - :param context: The context used for accessing the DB layer. - :param dict kwargs: Additional parameters that can be passed to the - action. - """ - super(NodeAction, self).__init__(target, action, context, **kwargs) - - try: - self.entity = node_mod.Node.load(self.context, node_id=self.target) - except Exception: - self.entity = None - - @profiler.trace('NodeAction.do_create', hide_args=False) - def do_create(self): - """Handler for the NODE_CREATE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - cluster_id = self.entity.cluster_id - if cluster_id and self.cause == consts.CAUSE_RPC: - # Check cluster size constraint if target cluster is specified - cluster = cm.Cluster.load(self.context, cluster_id) - desired = no.Node.count_by_cluster(self.context, cluster_id) - result = su.check_size_params(cluster, desired, None, None, True) - if result: - # cannot place node into the cluster - no.Node.update(self.context, self.entity.id, - {'cluster_id': '', 'status': consts.NS_ERROR}) - return self.RES_ERROR, result - - res, reason = self.entity.do_create(self.context) - - if cluster_id and self.cause == consts.CAUSE_RPC: - # Update cluster's desired_capacity and re-evaluate its status no - # matter the creation is a success or not because the node object - # is already treated as member of the cluster and the node - # creation may have changed the cluster's status - cluster.eval_status(self.context, consts.NODE_CREATE, - desired_capacity=desired) - if res: - return self.RES_OK, 'Node created successfully.' - else: - return self.RES_ERROR, reason - - @profiler.trace('NodeAction.do_delete', hide_args=False) - def do_delete(self): - """Handler for the NODE_DELETE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - cluster_id = self.entity.cluster_id - if cluster_id and self.cause == consts.CAUSE_RPC: - # If node belongs to a cluster, check size constraint - # before deleting it - cluster = cm.Cluster.load(self.context, cluster_id) - current = no.Node.count_by_cluster(self.context, cluster_id) - desired = current - 1 - result = su.check_size_params(cluster, desired, None, None, True) - if result: - return self.RES_ERROR, result - - # handle grace_period - pd = self.data.get('deletion', None) - if pd: - grace_period = pd.get('grace_period', 0) - if grace_period: - eventlet.sleep(grace_period) - - res = self.entity.do_delete(self.context) - - if cluster_id and self.cause == consts.CAUSE_RPC: - # check if desired_capacity should be changed - do_reduce = True - params = {} - pd = self.data.get('deletion', None) - if pd: - do_reduce = pd.get('reduce_desired_capacity', True) - if do_reduce and res: - params = {'desired_capacity': desired} - cluster.eval_status(self.context, consts.NODE_DELETE, **params) - - if not res: - return self.RES_ERROR, 'Node deletion failed.' - - return self.RES_OK, 'Node deleted successfully.' - - @profiler.trace('NodeAction.do_update', hide_args=False) - def do_update(self): - """Handler for the NODE_UPDATE action. - - :returns: A tuple containing the result and the corresponding reason. - """ - params = self.inputs - new_profile_id = params.get('new_profile_id', None) - if new_profile_id and new_profile_id == self.entity.profile_id: - params.pop('new_profile_id') - - if not params: - return self.RES_OK, 'No property to update.' - - res = self.entity.do_update(self.context, params) - if res: - return self.RES_OK, 'Node updated successfully.' - else: - return self.RES_ERROR, 'Node update failed.' - - @profiler.trace('NodeAction.do_join', hide_args=False) - def do_join(self): - """Handler for the NODE_JOIN action. - - Note that we don't manipulate the cluster's status after this - operation. This is because a NODE_JOIN is always an internal action, - i.e. derived from a cluster action. The cluster's status is supposed - to be checked and set in the outer cluster action rather than here. - - :returns: A tuple containing the result and the corresponding reason. - """ - cluster_id = self.inputs.get('cluster_id') - result = self.entity.do_join(self.context, cluster_id) - if result: - return self.RES_OK, 'Node successfully joined cluster.' - else: - return self.RES_ERROR, 'Node failed in joining cluster.' - - @profiler.trace('NodeAction.do_leave', hide_args=False) - def do_leave(self): - """Handler for the NODE_LEAVE action. - - Note that we don't manipulate the cluster's status after this - operation. This is because a NODE_JOIN is always an internal action, - i.e. derived from a cluster action. The cluster's status is supposed - to be checked and set in the outer cluster action rather than here. - - :returns: A tuple containing the result and the corresponding reason. - """ - res = self.entity.do_leave(self.context) - if res: - return self.RES_OK, 'Node successfully left cluster.' - else: - return self.RES_ERROR, 'Node failed in leaving cluster.' - - @profiler.trace('NodeAction.do_check', hide_args=False) - def do_check(self): - """Handler for the NODE_check action. - - :returns: A tuple containing the result and the corresponding reason. - """ - res = self.entity.do_check(self.context) - if res: - return self.RES_OK, 'Node check succeeded.' - else: - return self.RES_ERROR, 'Node check failed.' - - @profiler.trace('NodeAction.do_recover', hide_args=False) - def do_recover(self): - """Handler for the NODE_RECOVER action. - - :returns: A tuple containing the result and the corresponding reason. - """ - res = self.entity.do_recover(self.context, self) - if res: - return self.RES_OK, 'Node recovered successfully.' - else: - return self.RES_ERROR, 'Node recover failed.' - - @profiler.trace('NodeAction.do_operation', hide_args=False) - def do_operation(self): - """Handler for the NODE_OPERATION action. - - :returns: A tuple containing the result and the corresponding reason. - """ - operation = self.inputs['operation'] - res = self.entity.do_operation(self.context, **self.inputs) - if res: - return self.RES_OK, "Node operation '%s' succeeded." % operation - else: - return self.RES_ERROR, "Node operation '%s' failed." % operation - - def _execute(self): - """Private function that finds out the handler and execute it.""" - - action_name = self.action.lower() - method_name = action_name.replace('node', 'do') - method = getattr(self, method_name, None) - - if method is None: - reason = 'Unsupported action: %s' % self.action - EVENT.error(self, consts.PHASE_ERROR, reason) - return self.RES_ERROR, reason - - return method() - - def execute(self, **kwargs): - """Interface function for action execution. - - :param dict kwargs: Parameters provided to the action, if any. - :returns: A tuple containing the result and the related reason. - """ - # Since node.cluster_id could be reset to '' during action execution, - # we record it here for policy check and cluster lock release. - forced = (self.action in [consts.NODE_DELETE, consts.NODE_OPERATION]) - saved_cluster_id = self.entity.cluster_id - if saved_cluster_id: - if self.cause == consts.CAUSE_RPC: - res = senlin_lock.cluster_lock_acquire( - self.context, self.entity.cluster_id, self.id, self.owner, - senlin_lock.NODE_SCOPE, False) - - if not res: - return self.RES_RETRY, 'Failed in locking cluster' - - try: - self.policy_check(self.entity.cluster_id, 'BEFORE') - finally: - if self.data['status'] != pb.CHECK_OK: - # Don't emit message since policy_check should have - # done it - senlin_lock.cluster_lock_release( - saved_cluster_id, self.id, senlin_lock.NODE_SCOPE) - return self.RES_ERROR, ('Policy check: ' + - self.data['reason']) - elif self.cause == consts.CAUSE_DERIVED_LCH: - self.policy_check(saved_cluster_id, 'BEFORE') - - try: - res = senlin_lock.node_lock_acquire(self.context, self.entity.id, - self.id, self.owner, forced) - if not res: - res = self.RES_RETRY - reason = 'Failed in locking node' - else: - res, reason = self._execute() - if saved_cluster_id and self.cause == consts.CAUSE_RPC: - self.policy_check(saved_cluster_id, 'AFTER') - if self.data['status'] != pb.CHECK_OK: - res = self.RES_ERROR - reason = 'Policy check: ' + self.data['reason'] - finally: - senlin_lock.node_lock_release(self.entity.id, self.id) - if saved_cluster_id and self.cause == consts.CAUSE_RPC: - senlin_lock.cluster_lock_release(saved_cluster_id, self.id, - senlin_lock.NODE_SCOPE) - return res, reason - - def cancel(self): - """Handler for cancelling the action.""" - return self.RES_OK - - def release_lock(self): - """Handler to release the lock.""" - senlin_lock.node_lock_release(self.entity.id, self.id) - - # only release cluster lock if it was locked as part of this - # action (i.e. it's a user intiated action aka CAUSE_RPC from - # senlin API and a not a CAUSED_DERIVED) - if self.cause == consts.CAUSE_RPC: - senlin_lock.cluster_lock_release(self.entity.cluster_id, self.id, - senlin_lock.NODE_SCOPE) - return self.RES_OK diff --git a/senlin/engine/cluster.py b/senlin/engine/cluster.py deleted file mode 100644 index e88f8aca4..000000000 --- a/senlin/engine/cluster.py +++ /dev/null @@ -1,575 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from senlin.common import consts -from senlin.common import exception -from senlin.engine import cluster_policy as cpm -from senlin.engine import health_manager -from senlin.engine import node as node_mod -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cpo -from senlin.objects import node as no -from senlin.policies import base as pcb -from senlin.profiles import base as pfb - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class Cluster(object): - """A cluster is a collection of objects of the same profile type. - - All operations are performed without further checking because the - checkings are supposed to be done before/after/during an action is - executed. - """ - - def __init__(self, name, desired_capacity, profile_id, - context=None, **kwargs): - """Initialize a cluster object. - - The cluster defaults to have 0 node with no profile assigned. - """ - - self.id = kwargs.get('id', None) - self.name = name - self.profile_id = profile_id - - # Initialize the fields using kwargs passed in - self.user = kwargs.get('user', '') - self.project = kwargs.get('project', '') - self.domain = kwargs.get('domain', '') - - self.init_at = kwargs.get('init_at', None) - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.min_size = (kwargs.get('min_size') or - consts.CLUSTER_DEFAULT_MIN_SIZE) - self.max_size = (kwargs.get('max_size') or - consts.CLUSTER_DEFAULT_MAX_SIZE) - self.desired_capacity = desired_capacity - self.next_index = kwargs.get('next_index', 1) - self.timeout = (kwargs.get('timeout') or - cfg.CONF.default_action_timeout) - - self.status = kwargs.get('status', consts.CS_INIT) - self.status_reason = kwargs.get('status_reason', 'Initializing') - self.data = kwargs.get('data', {}) - self.metadata = kwargs.get('metadata') or {} - self.dependents = kwargs.get('dependents') or {} - self.config = kwargs.get('config') or {} - - # rt is a dict for runtime data - self.rt = { - 'profile': None, - 'nodes': [], - 'policies': [] - } - - if context is not None: - self._load_runtime_data(context) - - def _load_runtime_data(self, context): - if self.id is None: - return - - policies = [] - bindings = cpo.ClusterPolicy.get_all(context, self.id) - for b in bindings: - # Detect policy type conflicts - policy = pcb.Policy.load(context, b.policy_id, project_safe=False) - policies.append(policy) - - self.rt = { - 'profile': pfb.Profile.load(context, - profile_id=self.profile_id, - project_safe=False), - 'nodes': no.Node.get_all_by_cluster(context, self.id), - 'policies': policies - } - - def store(self, context): - """Store the cluster in database and return its ID. - - If the ID already exists, we do an update. - """ - - values = { - 'name': self.name, - 'profile_id': self.profile_id, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'init_at': self.init_at, - 'created_at': self.created_at, - 'updated_at': self.updated_at, - 'min_size': self.min_size, - 'max_size': self.max_size, - 'desired_capacity': self.desired_capacity, - 'next_index': self.next_index, - 'timeout': self.timeout, - 'status': self.status, - 'status_reason': self.status_reason, - 'meta_data': self.metadata, - 'data': self.data, - 'dependents': self.dependents, - 'config': self.config, - } - - timestamp = timeutils.utcnow(True) - if self.id: - values['updated_at'] = timestamp - co.Cluster.update(context, self.id, values) - else: - self.init_at = timestamp - values['init_at'] = timestamp - cluster = co.Cluster.create(context, values) - self.id = cluster.id - - self._load_runtime_data(context) - return self.id - - @classmethod - def _from_object(cls, context, obj): - """Construct a cluster from database object. - - :param context: the context used for DB operations; - :param obj: a DB cluster object that will receive all fields; - """ - kwargs = { - 'id': obj.id, - 'user': obj.user, - 'project': obj.project, - 'domain': obj.domain, - 'init_at': obj.init_at, - 'created_at': obj.created_at, - 'updated_at': obj.updated_at, - 'min_size': obj.min_size, - 'max_size': obj.max_size, - 'next_index': obj.next_index, - 'timeout': obj.timeout, - 'status': obj.status, - 'status_reason': obj.status_reason, - 'data': obj.data, - 'metadata': obj.metadata, - 'dependents': obj.dependents, - 'config': obj.config, - } - - return cls(obj.name, obj.desired_capacity, obj.profile_id, - context=context, **kwargs) - - @classmethod - def load(cls, context, cluster_id=None, dbcluster=None, project_safe=True): - """Retrieve a cluster from database.""" - if dbcluster is None: - dbcluster = co.Cluster.get(context, cluster_id, - project_safe=project_safe) - if dbcluster is None: - raise exception.ResourceNotFound(type='cluster', id=cluster_id) - - return cls._from_object(context, dbcluster) - - @classmethod - def load_all(cls, context, limit=None, marker=None, sort=None, - filters=None, project_safe=True): - """Retrieve all clusters from database.""" - - objs = co.Cluster.get_all(context, limit=limit, marker=marker, - sort=sort, filters=filters, - project_safe=project_safe) - - for obj in objs: - cluster = cls._from_object(context, obj) - yield cluster - - def set_status(self, context, status, reason=None, **kwargs): - """Set status of the cluster. - - :param context: A DB session for accessing the backend database. - :param status: A string providing the new status of the cluster. - :param reason: A string containing the reason for the status change. - It can be omitted when invoking this method. - :param dict kwargs: Other optional attributes to be updated. - :returns: Nothing. - """ - - values = {} - now = timeutils.utcnow(True) - if status == consts.CS_ACTIVE and self.status == consts.CS_CREATING: - self.created_at = now - values['created_at'] = now - elif (status == consts.CS_ACTIVE and - self.status in (consts.CS_UPDATING, consts.CS_RESIZING)): - self.updated_at = now - values['updated_at'] = now - - self.status = status - values['status'] = status - if reason: - self.status_reason = reason - values['status_reason'] = reason - - for k, v in kwargs.items(): - if hasattr(self, k): - setattr(self, k, v) - values[k] = v - - # There is a possibility that the profile id is changed - if 'profile_id' in values: - profile = pfb.Profile.load(context, profile_id=self.profile_id) - self.rt['profile'] = profile - co.Cluster.update(context, self.id, values) - return - - def do_create(self, context, **kwargs): - """Additional logic at the beginning of cluster creation process. - - Set cluster status to CREATING. - """ - if self.status != consts.CS_INIT: - LOG.error('Cluster is in status "%s"', self.status) - return False - - self.set_status(context, consts.CS_CREATING, 'Creation in progress') - try: - pfb.Profile.create_cluster_object(context, self) - except exception.EResourceCreation as ex: - self.set_status(context, consts.CS_ERROR, str(ex)) - return False - - return True - - def do_delete(self, context, **kwargs): - """Additional logic at the end of cluster deletion process.""" - self.set_status(context, consts.CS_DELETING, 'Deletion in progress') - - try: - pfb.Profile.delete_cluster_object(context, self) - except exception.EResourceDeletion as ex: - self.set_status(context, consts.CS_ERROR, str(ex)) - return False - - co.Cluster.delete(context, self.id) - return True - - def do_update(self, context, **kwargs): - """Additional logic at the beginning of cluster updating progress. - - This method is intended to be called only from an action. - """ - self.set_status(context, consts.CS_UPDATING, 'Update in progress') - return True - - def do_check(self, context, **kwargs): - """Additional logic at the beginning of cluster checking process. - - Set cluster status to CHECKING. - """ - self.set_status(context, consts.CS_CHECKING, 'Check in progress') - return True - - def do_recover(self, context, **kwargs): - """Additional logic at the beginning of cluster recovering process. - - Set cluster status to RECOVERING. - """ - self.set_status(context, consts.CS_RECOVERING, 'Recovery in progress') - return True - - def do_operation(self, context, **kwargs): - """Additional logic at the beginning of cluster recovering process. - - Set cluster status to OPERATING. - """ - operation = kwargs.get("operation", "unknown") - self.set_status(context, consts.CS_OPERATING, - "Operation %s in progress" % operation) - return True - - def attach_policy(self, ctx, policy_id, values): - """Attach policy object to the cluster. - - Note this method MUST be called with the cluster locked. - - :param ctx: A context for DB operation. - :param policy_id: ID of the policy object. - :param values: Optional dictionary containing binding properties. - - :returns: A tuple containing a boolean result and a reason string. - """ - - policy = pcb.Policy.load(ctx, policy_id) - # Check if policy has already been attached - for existing in self.rt['policies']: - # Policy already attached - if existing.id == policy_id: - return True, 'Policy already attached.' - - # Detect policy type conflicts - if (existing.type == policy.type) and policy.singleton: - reason = ("Only one instance of policy type (%(ptype)s) can " - "be attached to a cluster, but another instance " - "(%(existing)s) is found attached to the cluster " - "(%(cluster)s) already." - ) % {'ptype': policy.type, - 'existing': existing.id, - 'cluster': self.id} - return False, reason - - # invoke policy callback - enabled = bool(values.get('enabled', True)) - res, data = policy.attach(self, enabled=enabled) - if not res: - return False, data - - kwargs = { - 'enabled': enabled, - 'data': data, - 'priority': policy.PRIORITY - } - - cp = cpm.ClusterPolicy(self.id, policy_id, **kwargs) - cp.store(ctx) - - # refresh cached runtime - self.rt['policies'].append(policy) - - return True, 'Policy attached.' - - def update_policy(self, ctx, policy_id, **values): - """Update a policy that is already attached to a cluster. - - Note this method must be called with the cluster locked. - :param ctx: A context for DB operation. - :param policy_id: ID of the policy object. - :param values: Optional dictionary containing new binding properties. - - :returns: A tuple containing a boolean result and a string reason. - """ - # Check if policy has already been attached - found = False - for existing in self.policies: - if existing.id == policy_id: - found = True - break - if not found: - return False, 'Policy not attached.' - - enabled = values.get('enabled', None) - if enabled is None: - return True, 'No update is needed.' - - params = {'enabled': bool(enabled)} - # disable health check if necessary - policy_type = existing.type.split('-')[0] - if policy_type == 'senlin.policy.health': - if enabled is True: - health_manager.enable(self.id) - else: - health_manager.disable(self.id) - - cpo.ClusterPolicy.update(ctx, self.id, policy_id, params) - return True, 'Policy updated.' - - def detach_policy(self, ctx, policy_id): - """Detach policy object from the cluster. - - Note this method MUST be called with the cluster locked. - - :param ctx: A context for DB operation. - :param policy_id: ID of the policy object. - - :returns: A tuple containing a boolean result and a reason string. - """ - # Check if policy has already been attached - found = None - for existing in self.policies: - if existing.id == policy_id: - found = existing - break - if found is None: - return False, 'Policy not attached.' - - policy = pcb.Policy.load(ctx, policy_id) - res, reason = policy.detach(self) - if not res: - return res, reason - - cpo.ClusterPolicy.delete(ctx, self.id, policy_id) - self.rt['policies'].remove(found) - - return True, 'Policy detached.' - - @property - def nodes(self): - return self.rt['nodes'] - - def add_node(self, node): - """Append specified node to the cluster cache. - - :param node: The node to become a new member of the cluster. - """ - self.rt['nodes'].append(node) - - def remove_node(self, node_id): - """Remove node with specified ID from cache. - - :param node_id: ID of the node to be removed from cache. - """ - for node in self.rt['nodes']: - if node.id == node_id: - self.rt['nodes'].remove(node) - - def update_node(self, nodes): - """Update cluster runtime data - - :param nodes: List of node objects - """ - self.rt['nodes'] = nodes - - @property - def policies(self): - return self.rt['policies'] - - def get_region_distribution(self, regions): - """Get node distribution regarding given regions. - - :param regions: list of region names to check. - :return: a dict containing region and number as key value pairs. - """ - dist = dict.fromkeys(regions, 0) - - for node in self.nodes: - placement = node.data.get('placement', {}) - if placement: - region = placement.get('region_name', None) - if region and region in regions: - dist[region] += 1 - - return dist - - def get_zone_distribution(self, ctx, zones): - """Get node distribution regarding the given the availability zones. - - The availability zone information is only available for some profiles. - - :param ctx: context used to access node details. - :param zones: list of zone names to check. - :returns: a dict containing zone and number as key-value pairs. - """ - dist = dict.fromkeys(zones, 0) - - for node in self.nodes: - placement = node.data.get('placement', {}) - if placement and 'zone' in placement: - zone = placement['zone'] - dist[zone] += 1 - else: - details = node.get_details(ctx) - zname = details.get('OS-EXT-AZ:availability_zone', None) - if zname and zname in dist: - dist[zname] += 1 - - return dist - - def nodes_by_region(self, region): - """Get list of nodes that belong to the specified region. - - :param region: Name of region for filtering. - :return: A list of nodes that are from the specified region. - """ - result = [] - for node in self.nodes: - placement = node.data.get('placement', {}) - if placement and 'region_name' in placement: - if region == placement['region_name']: - result.append(node) - return result - - def nodes_by_zone(self, zone): - """Get list of nodes that reside in the specified availability zone. - - :param zone: Name of availability zone for filtering. - :return: A list of nodes that reside in the specified AZ. - """ - result = [] - for node in self.nodes: - placement = node.data.get('placement', {}) - if placement and 'zone' in placement: - if zone == placement['zone']: - result.append(node) - return result - - def health_check(self, ctx): - """Check physical resources status - - :param ctx: The context to operate node object - """ - # Note this procedure is a pure sequential operation, - # its not suitable for large scale clusters. - - old_nodes = self.nodes - for node in old_nodes: - node.do_check(ctx) - - nodes = node_mod.Node.load_all(ctx, cluster_id=self.id) - self.update_node([n for n in nodes]) - - def eval_status(self, ctx, operation, **params): - """Re-evaluate cluster's health status. - - :param ctx: The requesting context. - :param operation: The operation that triggers this status evaluation. - :returns: ``None``. - """ - nodes = node_mod.Node.load_all(ctx, cluster_id=self.id) - self.rt['nodes'] = [n for n in nodes] - - active_count = 0 - for node in self.nodes: - if node.status == consts.NS_ACTIVE: - active_count += 1 - - # get provided desired_capacity/min_size/max_size - desired = params.get('desired_capacity', self.desired_capacity) - min_size = params.get('min_size', self.min_size) - max_size = params.get('max_size', self.max_size) - - values = params or {} - if active_count < min_size: - status = consts.CS_ERROR - reason = ("%(o)s: number of active nodes is below min_size " - "(%(n)d).") % {'o': operation, 'n': min_size} - elif active_count < desired: - status = consts.CS_WARNING - reason = ("%(o)s: number of active nodes is below " - "desired_capacity " - "(%(n)d).") % {'o': operation, 'n': desired} - elif max_size < 0 or active_count <= max_size: - status = consts.CS_ACTIVE - reason = ("%(o)s: number of active nodes is equal or above " - "desired_capacity " - "(%(n)d).") % {'o': operation, 'n': desired} - else: - status = consts.CS_WARNING - reason = ("%(o)s: number of active nodes is above max_size " - "(%(n)d).") % {'o': operation, 'n': max_size} - - values.update({'status': status, 'status_reason': reason}) - co.Cluster.update(ctx, self.id, values) diff --git a/senlin/engine/cluster_policy.py b/senlin/engine/cluster_policy.py deleted file mode 100644 index f1f4803ab..000000000 --- a/senlin/engine/cluster_policy.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import exception -from senlin.objects import cluster_policy as cpo - - -class ClusterPolicy(object): - """Object representing a binding between a cluster and a policy. - - This object also records the runtime data of a policy, if any. - """ - def __init__(self, cluster_id, policy_id, **kwargs): - self.id = kwargs.get('id', None) - - self.cluster_id = cluster_id - self.policy_id = policy_id - self.enabled = kwargs.get('enabled') - self.data = kwargs.get('data', {}) - self.priority = kwargs.get('priority') - self.last_op = kwargs.get('last_op', None) - - # derived data from binding, put here for convenience - self.cluster_name = kwargs.get('cluster_name', '') - self.policy_name = kwargs.get('policy_name', '') - self.policy_type = kwargs.get('policy_type', '') - - def store(self, context): - """Store the binding record into database table.""" - values = { - 'enabled': self.enabled, - 'data': self.data, - 'last_op': self.last_op, - 'priority': self.priority - } - - if self.id: - cpo.ClusterPolicy.update(context, self.cluster_id, self.policy_id, - values) - else: - binding = cpo.ClusterPolicy.create(context, self.cluster_id, - self.policy_id, values) - self.cluster_name = binding.cluster.name - self.policy_name = binding.policy.name - self.policy_type = binding.policy.type - self.id = binding.id - - return self.id - - @classmethod - def _from_object(cls, context, obj): - """Construct a cluster policy binding from database object. - - :param context: the context used for DB operations; - :param obj: a cluster-policy binding object that contains all fields; - """ - kwargs = { - 'id': obj.id, - 'enabled': obj.enabled, - 'data': obj.data, - 'last_op': obj.last_op, - 'priority': obj.priority, - - # derived data - 'cluster_name': obj.cluster.name, - 'policy_name': obj.policy.name, - 'policy_type': obj.policy.type, - } - - return cls(obj.cluster_id, obj.policy_id, context=context, **kwargs) - - @classmethod - def load(cls, context, cluster_id, policy_id): - """Retrieve a cluster-policy binding from database.""" - - binding = cpo.ClusterPolicy.get(context, cluster_id, policy_id) - if binding is None: - raise exception.PolicyNotAttached(policy=policy_id, - cluster=cluster_id) - - return cls._from_object(context, binding) - - def to_dict(self): - binding_dict = { - 'id': self.id, - 'cluster_id': self.cluster_id, - 'policy_id': self.policy_id, - 'enabled': self.enabled, - 'data': self.data, - 'last_op': self.last_op, - # below are derived data for user's convenience - 'cluster_name': self.cluster_name, - 'policy_name': self.policy_name, - 'policy_type': self.policy_type, - } - return binding_dict diff --git a/senlin/engine/dispatcher.py b/senlin/engine/dispatcher.py deleted file mode 100644 index 59cec1c6e..000000000 --- a/senlin/engine/dispatcher.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_context import context as oslo_context -from oslo_log import log as logging -import oslo_messaging - -from senlin.common import consts -from senlin.common import messaging - -LOG = logging.getLogger(__name__) - -OPERATIONS = ( - START_ACTION, CANCEL_ACTION, STOP -) = ( - 'start_action', 'cancel_action', 'stop' -) - - -def notify(method, engine_id=None, **kwargs): - """Send notification to dispatcher. - - Note that dispatcher is an engine internal communication. We are not using - versioned object serialization at this level. - - :param method: remote method to call - :param engine_id: dispatcher to notify; None implies broadcast - """ - client = messaging.get_rpc_client(consts.ENGINE_TOPIC, cfg.CONF.host) - - if engine_id: - # Notify specific dispatcher identified by engine_id - call_context = client.prepare(server=engine_id) - else: - # Broadcast to all disptachers - call_context = client.prepare(fanout=True) - - try: - # We don't use ctext parameter in action progress - # actually. But since RPCClient.call needs this param, - # we use oslo current context here. - call_context.cast(oslo_context.get_current(), method, **kwargs) - return True - except oslo_messaging.MessagingTimeout: - return False - - -def start_action(engine_id=None, **kwargs): - return notify(START_ACTION, engine_id, **kwargs) diff --git a/senlin/engine/environment.py b/senlin/engine/environment.py deleted file mode 100644 index 606db6faf..000000000 --- a/senlin/engine/environment.py +++ /dev/null @@ -1,228 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glob -import os.path -from stevedore import extension - -from oslo_config import cfg -from oslo_log import log as logging - -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.engine import parser -from senlin.engine import registry - -LOG = logging.getLogger(__name__) - -_environment = None - - -def global_env(): - global _environment - - if _environment is None: - initialize() - return _environment - - -class Environment(object): - """An object that contains all profiles, policies and customizations.""" - - SECTIONS = ( - PARAMETERS, CUSTOM_PROFILES, CUSTOM_POLICIES, - ) = ( - 'parameters', 'custom_profiles', 'custom_policies', - ) - - def __init__(self, env=None, is_global=False): - """Create an Environment from a dict. - - :param env: the json environment - :param is_global: boolean indicating if this is a user created one. - """ - self.params = {} - if is_global: - self.profile_registry = registry.Registry('profiles') - self.policy_registry = registry.Registry('policies') - self.driver_registry = registry.Registry('drivers') - self.endpoint_registry = registry.Registry('endpoints') - else: - self.profile_registry = registry.Registry( - 'profiles', global_env().profile_registry) - self.policy_registry = registry.Registry( - 'policies', global_env().policy_registry) - self.driver_registry = registry.Registry( - 'drivers', global_env().driver_registry) - self.endpoint_registry = registry.Registry( - 'endpoints', global_env().endpoint_registry) - - if env is not None: - # Merge user specified keys with current environment - self.params = env.get(self.PARAMETERS, {}) - custom_profiles = env.get(self.CUSTOM_PROFILES, {}) - custom_policies = env.get(self.CUSTOM_POLICIES, {}) - self.profile_registry.load(custom_profiles) - self.policy_registry.load(custom_policies) - - def parse(self, env_str): - """Parse a string format environment file into a dictionary.""" - - if env_str is None: - return {} - - env = parser.simple_parse(env_str) - - # Check unknown sections - for sect in env: - if sect not in self.SECTIONS: - msg = _('environment has unknown section "%s"') % sect - raise ValueError(msg) - - # Fill in default values for missing sections - for sect in self.SECTIONS: - if sect not in env: - env[sect] = {} - - return env - - def load(self, env_dict): - """Load environment from the given dictionary.""" - - self.params.update(env_dict.get(self.PARAMETERS, {})) - self.profile_registry.load(env_dict.get(self.CUSTOM_PROFILES, {})) - self.policy_registry.load(env_dict.get(self.CUSTOM_POLICIES, {})) - - def _check_plugin_name(self, plugin_type, name): - if name is None or name == "": - msg = _('%s type name not specified') % plugin_type - raise exception.InvalidPlugin(message=msg) - elif not isinstance(name, str): - msg = _('%s type name is not a string') % plugin_type - raise exception.InvalidPlugin(message=msg) - - def register_profile(self, name, plugin): - self._check_plugin_name('Profile', name) - self.profile_registry.register_plugin(name, plugin) - - def get_profile(self, name): - self._check_plugin_name('Profile', name) - plugin = self.profile_registry.get_plugin(name) - if plugin is None: - raise exception.ResourceNotFound(type='profile_type', id=name) - return plugin - - def get_profile_types(self): - return self.profile_registry.get_types() - - def register_policy(self, name, plugin): - self._check_plugin_name('Policy', name) - self.policy_registry.register_plugin(name, plugin) - - def get_policy(self, name): - self._check_plugin_name('Policy', name) - plugin = self.policy_registry.get_plugin(name) - if plugin is None: - raise exception.ResourceNotFound(type='policy_type', id=name) - return plugin - - def get_policy_types(self): - return self.policy_registry.get_types() - - def register_driver(self, name, plugin): - self._check_plugin_name('Driver', name) - self.driver_registry.register_plugin(name, plugin) - - def get_driver(self, name): - self._check_plugin_name('Driver', name) - plugin = self.driver_registry.get_plugin(name) - if plugin is None: - msg = _('Driver plugin %(name)s is not found.') % {'name': name} - raise exception.InvalidPlugin(message=msg) - return plugin - - def get_driver_types(self): - return self.driver_registry.get_types() - - def register_endpoint(self, name, plugin): - self._check_plugin_name('Endpoint', name) - plugin = self.endpoint_registry.register_plugin(name, plugin) - - def get_endpoint(self, name): - self._check_plugin_name('Endpoint', name) - plugin = self.endpoint_registry.get_plugin(name) - if plugin is None: - msg = _('Endpoint plugin %(name)s is not found.') % {'name': name} - raise exception.InvalidPlugin(message=msg) - return plugin - - def read_global_environment(self): - """Read and parse global environment files.""" - - cfg.CONF.import_opt('environment_dir', 'senlin.conf') - env_dir = cfg.CONF.environment_dir - - try: - files = glob.glob(os.path.join(env_dir, '*')) - except OSError as ex: - LOG.error('Failed to read %s', env_dir) - LOG.exception(ex) - return - - for fname in files: - try: - with open(fname) as f: - LOG.info('Loading environment from %s', fname) - self.load(self.parse(f.read())) - except ValueError as vex: - LOG.error('Failed to parse %s', fname) - LOG.exception(str(vex)) - except IOError as ioex: - LOG.error('Failed to read %s', fname) - LOG.exception(str(ioex)) - - -def _get_mapping(namespace): - mgr = extension.ExtensionManager( - namespace=namespace, - invoke_on_load=False) - return [[name, mgr[name].plugin] for name in mgr.names()] - - -def initialize(): - - global _environment - - if _environment is not None: - return - - env = Environment(is_global=True) - - # Register global plugins when initialized - entries = _get_mapping('senlin.profiles') - for name, plugin in entries: - env.register_profile(name, plugin) - - entries = _get_mapping('senlin.policies') - for name, plugin in entries: - env.register_policy(name, plugin) - - entries = _get_mapping('senlin.drivers') - for name, plugin in entries: - env.register_driver(name, plugin) - - entries = _get_mapping('senlin.endpoints') - for name, plugin in entries: - env.register_endpoint(name, plugin) - - env.read_global_environment() - _environment = env diff --git a/senlin/engine/event.py b/senlin/engine/event.py deleted file mode 100644 index a2bf93a3d..000000000 --- a/senlin/engine/event.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from stevedore import named - -from senlin.common import consts - -LOG = logging.getLogger(__name__) -FMT = '%(name)s[%(obj_id)s] %(action)s[%(id)s] %(phase)s: %(reason)s' -dispatchers = None - - -def load_dispatcher(): - """Load dispatchers.""" - global dispatchers - - LOG.debug("Loading dispatchers") - dispatchers = named.NamedExtensionManager( - namespace="senlin.dispatchers", - names=cfg.CONF.event_dispatchers, - invoke_on_load=True, - propagate_map_exceptions=True) - if not list(dispatchers): - LOG.warning("No dispatchers configured for 'senlin.dispatchers'") - else: - LOG.info("Loaded dispatchers: %s", dispatchers.names()) - - -def _event_data(action, phase=None, reason=None): - action_name = action.action - if action_name in [consts.NODE_OPERATION, consts.CLUSTER_OPERATION]: - action_name = action.inputs.get('operation', action_name) - name = action.entity.name if action.entity else "Unknown" - obj_id = action.entity.id[:8] if action.entity else "Unknown" - - return dict(name=name, - obj_id=obj_id, - action=action_name, - id=action.id[:8], - phase=phase, - reason=reason) - - -def _dump(level, action, phase, reason, timestamp): - global dispatchers - - if timestamp is None: - timestamp = timeutils.utcnow(True) - - # We check the logging level threshold only when debug is False - if cfg.CONF.debug is False: - watermark = cfg.CONF.dispatchers.priority.upper() - bound = consts.EVENT_LEVELS.get(watermark, logging.INFO) - if level < bound: - return - - if cfg.CONF.dispatchers.exclude_derived_actions: - if action.cause == consts.CAUSE_DERIVED: - return - - try: - dispatchers.map_method("dump", level, action, - phase=phase, reason=reason, timestamp=timestamp) - except Exception as ex: - LOG.exception("Dispatcher failed to handle the event: %s", - str(ex)) - - -def critical(action, phase=None, reason=None, timestamp=None): - _dump(logging.CRITICAL, action, phase, reason, timestamp) - LOG.critical(FMT, _event_data(action, phase, reason)) - - -def error(action, phase=None, reason=None, timestamp=None): - _dump(logging.ERROR, action, phase, reason, timestamp) - LOG.error(FMT, _event_data(action, phase, reason)) - - -def warning(action, phase=None, reason=None, timestamp=None): - _dump(logging.WARNING, action, phase, reason, timestamp) - LOG.warning(FMT, _event_data(action, phase, reason)) - - -def info(action, phase=None, reason=None, timestamp=None): - _dump(logging.INFO, action, phase, reason, timestamp) - LOG.info(FMT, _event_data(action, phase, reason)) - - -def debug(action, phase=None, reason=None, timestamp=None): - _dump(logging.DEBUG, action, phase, reason, timestamp) - LOG.debug(FMT, _event_data(action, phase, reason)) diff --git a/senlin/engine/health_manager.py b/senlin/engine/health_manager.py deleted file mode 100644 index b5dd55248..000000000 --- a/senlin/engine/health_manager.py +++ /dev/null @@ -1,863 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Health Manager. - -Health Manager is responsible for monitoring the health of the clusters and -trigger corresponding actions to recover the clusters based on the pre-defined -health policies. -""" -from collections import defaultdict -from collections import namedtuple -import eventlet -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_utils import timeutils -import re -import tenacity - -from senlin.common import consts -from senlin.common import context -from senlin.common import messaging as rpc -from senlin.common import utils -from senlin.db.sqlalchemy import api as db_api -from senlin.engine import node as node_mod -from senlin.engine.notifications import heat_endpoint -from senlin.engine.notifications import nova_endpoint -from senlin import objects -from senlin.rpc import client as rpc_client - -LOG = logging.getLogger(__name__) - - -def chase_up(start_time, interval, name='Poller'): - """Utility function to check if there are missed intervals. - - :param start_time: A time object representing the starting time. - :param interval: An integer specifying the time interval in seconds. - :param name: Name of the caller for identification in logs. - :returns: Number of seconds to sleep before next round. - """ - end_time = timeutils.utcnow(True) - elapsed = timeutils.delta_seconds(start_time, end_time) - # check if we have missed any intervals? - missed = int((elapsed - 0.0000001) / interval) - if missed >= 1: - LOG.warning("%s missed %s intervals for checking", name, missed) - return (missed + 1) * interval - elapsed - - -def ListenerProc(exchange, project_id, cluster_id, recover_action): - """Thread procedure for running an event listener. - - :param exchange: The control exchange for a target service. - :param project_id: The ID of the project to filter. - :param cluster_id: The ID of the cluster to filter. - :param recover_action: The health policy action name. - """ - transport = messaging.get_notification_transport(cfg.CONF) - - if exchange == cfg.CONF.health_manager.nova_control_exchange: - endpoint = nova_endpoint.NovaNotificationEndpoint( - project_id, cluster_id, recover_action - ) - - else: - endpoint = heat_endpoint.HeatNotificationEndpoint( - project_id, cluster_id, recover_action - ) - - listener = messaging.get_notification_listener( - transport, [endpoint.target], [endpoint], executor='threading', - pool='senlin-listeners' - ) - - listener.start() - - -class HealthCheckType(object): - @staticmethod - def factory(detection_type, cid, interval, params): - node_update_timeout = params['node_update_timeout'] - detection_params = [ - p for p in params['detection_modes'] - if p['type'] == detection_type - ] - if len(detection_params) != 1: - raise Exception( - 'The same detection mode cannot be used more than once in the ' - 'same policy. Encountered {} instances of ' - 'type {}.'.format(len(detection_params), detection_type) - ) - - if detection_type == consts.NODE_STATUS_POLLING: - return NodePollStatusHealthCheck( - cid, interval, node_update_timeout, detection_params[0]) - elif detection_type == consts.NODE_STATUS_POLL_URL: - return NodePollUrlHealthCheck( - cid, interval, node_update_timeout, detection_params[0]) - elif detection_type == consts.HYPERVISOR_STATUS_POLLING: - return HypervisorPollStatusHealthCheck( - cid, interval, node_update_timeout, detection_params[0]) - else: - raise Exception( - 'Invalid detection type: {}'.format(detection_type)) - - def __init__(self, cluster_id, interval, node_update_timeout, params): - """Initialize HealthCheckType - - :param ctx: - :param cluster_id: The UUID of the cluster to be checked. - :param params: Parameters specific to poll url or recovery action. - """ - self.cluster_id = cluster_id - self.interval = interval - self.node_update_timeout = node_update_timeout - self.params = params - - def run_health_check(self, ctx, node): - """Run health check on node - - :returns: True if node is healthy. False otherwise. - """ - pass - - def _node_within_grace_period(self, node): - """Check if current time is within the node_update_timeout grace period - - :returns: True if current time is less than node_update_timeout since - last node update action. False otherwise. - """ - - node_last_updated = node.updated_at or node.init_at - if timeutils.is_older_than(node_last_updated, - self.node_update_timeout): - # node was last updated more than node_update_timeout seconds ago - # -> we are outside the grace period - LOG.info("%s was updated at %s which is more " - "than %d secs ago. Mark node as unhealthy.", - node.name, node_last_updated, - self.node_update_timeout) - return False - else: - # node was last updated less than node_update_timeout seconds ago - # -> we are inside the grace period - LOG.info("%s was updated at %s which is less " - "than %d secs ago. Mark node as healthy.", - node.name, node_last_updated, - self.node_update_timeout) - return True - - -class NodePollStatusHealthCheck(HealthCheckType): - def run_health_check(self, ctx, node): - """Routine to be executed for polling node status. - - :returns: True if node is healthy. False otherwise. - """ - try: - # create engine node from db node - entity = node_mod.Node._from_object(ctx, node) - - # If health check returns True, return True to mark node as - # healthy. Else return True to mark node as healthy if we are still - # within the node's grace period to allow the node to warm-up. - # Return False to mark the node as unhealthy if we are outside the - # grace period. - - return (entity.do_healthcheck(ctx, consts.NODE_STATUS_POLLING) or - self._node_within_grace_period(node)) - except Exception as ex: - LOG.warning( - 'Error when performing health check on node %s: %s', - node.id, ex - ) - - # treat node as healthy when an exception is encountered - return True - - -class HypervisorPollStatusHealthCheck(HealthCheckType): - def run_health_check(self, ctx, node): - """Routine to be executed for polling hypervisor status. - - :returns: True if node is healthy. False otherwise. - """ - try: - # create engine node from db node - entity = node_mod.Node._from_object(ctx, node) - - # If health check returns True, return True to mark node as - # healthy. Else return True to mark node as healthy if we are still - # within the node's grace period to allow the node to warm-up. - # Return False to mark the node as unhealthy if we are outside the - # grace period. - - return (entity.do_healthcheck(ctx, - consts.HYPERVISOR_STATUS_POLLING) or - self._node_within_grace_period(node)) - except Exception as ex: - LOG.warning( - 'Error when performing health check on node %s: %s', - node.id, ex - ) - - # treat node as healthy when an exception is encountered - return True - - -class NodePollUrlHealthCheck(HealthCheckType): - @staticmethod - def convert_detection_tuple(dictionary): - return namedtuple('DetectionMode', dictionary.keys())(**dictionary) - - def _expand_url_template(self, url_template, node): - """Expands parameters in an URL template - - :param url_template: - A string containing parameters that will be expanded. Currently - only the {nodename} parameter is supported, which will be replaced - by the actual node name. - :param node: The DB object for the node to use for parameter expansion - :returns: A string containing the expanded URL - """ - - nodename_pattern = re.compile("(\{nodename\})") - url = nodename_pattern.sub(node.name, url_template) - - return url - - def _poll_url(self, url, node): - verify_ssl = self.params['poll_url_ssl_verify'] - conn_error_as_unhealthy = self.params[ - 'poll_url_conn_error_as_unhealthy'] - expected_resp_str = self.params['poll_url_healthy_response'] - retry_interval = self.params['poll_url_retry_interval'] - - timeout = max(retry_interval * 0.1, 1) - - try: - result = utils.url_fetch(url, timeout=timeout, - verify=verify_ssl) - except Exception as ex: - if conn_error_as_unhealthy: - LOG.info("%s for %s: connection error when polling URL (%s)", - consts.POLL_URL_FAIL, node.name, ex) - return False - else: - LOG.info("%s for %s: ignoring connection error when polling " - "URL (%s)", - consts.POLL_URL_PASS, node.name, ex) - return True - - if not re.search(expected_resp_str, result): - LOG.info("%s for %s: did not find expected response string %s in " - "URL result (%s)", - consts.POLL_URL_FAIL, node.name, expected_resp_str, - result) - return False - - LOG.info("%s for %s: matched expected response string.", - consts.POLL_URL_PASS, node.name) - return True - - def run_health_check(self, ctx, node): - """Routine to check a node status from a url and recovery if necessary - - :param node: The node to be checked. - :returns: True if node is healthy. False otherwise. - """ - - max_unhealthy_retry = self.params['poll_url_retry_limit'] - retry_interval = self.params['poll_url_retry_interval'] - - def _return_last_value(retry_state): - return retry_state.outcome.result() - - @tenacity.retry( - retry=tenacity.retry_if_result(lambda x: x is False), - wait=tenacity.wait_fixed(retry_interval), - retry_error_callback=_return_last_value, - stop=tenacity.stop_after_attempt(max_unhealthy_retry) - ) - def _poll_url_with_retry(url): - return self._poll_url(url, node) - - try: - if node.status != consts.NS_ACTIVE: - LOG.info("%s for %s: node is not in ACTIVE state, so skip " - "poll url", - consts.POLL_URL_PASS, node.name) - return True - - url_template = self.params['poll_url'] - url = self._expand_url_template(url_template, node) - - # If health check returns True, return True to mark node as - # healthy. Else return True to mark node as healthy if we are still - # within the node's grace period to allow the node to warm-up. - # Return False to mark the node as unhealthy if we are outside the - # grace period. - - return (_poll_url_with_retry(url) or - self._node_within_grace_period(node)) - except Exception as ex: - LOG.warning( - "%s for %s: Ignoring error on poll URL: %s", - consts.POLL_URL_PASS, node.name, ex - ) - - # treat node as healthy when an exception is encountered - return True - - -class HealthCheck(object): - - def __init__(self, ctx, engine_id, cluster_id, check_type, interval, - node_update_timeout, params, enabled): - self.rpc_client = rpc_client.get_engine_client() - self.ctx = ctx - self.engine_id = engine_id - - self.cluster_id = cluster_id - self.check_type = check_type - self.interval = interval - self.node_update_timeout = node_update_timeout - self.params = params - self.enabled = enabled - self.timer = None - self.listener = None - - self.health_check_types = [] - self.recover_action = {} - self.type = None - self.get_health_check_types() - self.get_recover_actions() - - def get_health_check_types(self): - polling_types = [consts.NODE_STATUS_POLLING, - consts.NODE_STATUS_POLL_URL, - consts.HYPERVISOR_STATUS_POLLING] - - detection_types = self.check_type.split(',') - if all(check in polling_types for check in detection_types): - interval = min(self.interval, cfg.CONF.check_interval_max) - for check in detection_types: - self.health_check_types.append( - HealthCheckType.factory( - check, self.cluster_id, interval, self.params - ) - ) - self.type = consts.POLLING - elif (len(detection_types) == 1 and - detection_types[0] == consts.LIFECYCLE_EVENTS): - self.type = consts.EVENTS - - def get_recover_actions(self): - if 'node_delete_timeout' in self.params: - self.recover_action['delete_timeout'] = self.params[ - 'node_delete_timeout'] - if 'node_force_recreate' in self.params: - self.recover_action['force_recreate'] = self.params[ - 'node_force_recreate'] - if 'recover_action' in self.params: - rac = self.params['recover_action'] - for operation in rac: - self.recover_action['operation'] = operation.get('name') - - def execute_health_check(self): - start_time = timeutils.utcnow(True) - - try: - if not self.health_check_types: - LOG.error("No health check types found for cluster: %s", - self.cluster_id) - return chase_up(start_time, self.interval) - - cluster = objects.Cluster.get(self.ctx, self.cluster_id, - project_safe=False) - if not cluster: - LOG.warning("Cluster (%s) is not found.", self.cluster_id) - return chase_up(start_time, self.interval) - - ctx = context.get_service_context(user_id=cluster.user, - project_id=cluster.project) - - actions = [] - - # loop through nodes and run all health checks on each node - nodes = objects.Node.get_all_by_cluster(ctx, self.cluster_id) - - for node in nodes: - action = self._check_node_health(ctx, node, cluster) - if action: - actions.append(action) - - for a in actions: - # wait for action to complete - res, reason = self._wait_for_action( - ctx, a['action'], self.node_update_timeout) - if not res: - LOG.warning("Node recovery action %s did not complete " - "within specified timeout: %s", a['action'], - reason) - - if len(actions) == 0: - LOG.info("Health check passed for all nodes in cluster %s.", - self.cluster_id) - except Exception as ex: - LOG.warning("Error while performing health check: %s", ex) - - finally: - return chase_up(start_time, self.interval) - - def _check_node_health(self, ctx, node, cluster): - node_is_healthy = True - - if self.params['recovery_conditional'] == consts.ANY_FAILED: - # recovery happens if any detection mode fails - # i.e. the inverse logic is that node is considered healthy - # if all detection modes pass - node_is_healthy = all( - hc.run_health_check(ctx, node) - for hc in self.health_check_types) - elif self.params['recovery_conditional'] == consts.ALL_FAILED: - # recovery happens if all detection modes fail - # i.e. the inverse logic is that node is considered healthy - # if any detection mode passes - node_is_healthy = any( - hc.run_health_check(ctx, node) - for hc in self.health_check_types) - else: - raise Exception("%s is an invalid recovery conditional" % - self.params['recovery_conditional']) - - if not node_is_healthy: - LOG.info("Health check failed for %s in %s and " - "recovery has started.", - node.name, cluster.name) - return self._recover_node(ctx, node.id) - - def _wait_for_action(self, ctx, action_id, timeout): - req = objects.ActionGetRequest(identity=action_id) - action = {} - with timeutils.StopWatch(timeout) as timeout_watch: - while not timeout_watch.expired(): - action = self.rpc_client.call(ctx, 'action_get', req) - if action['status'] in [consts.ACTION_SUCCEEDED, - consts.ACTION_FAILED, - consts.ACTION_CANCELLED]: - break - eventlet.sleep(2) - - if not action: - return False, "Failed to retrieve action." - - elif action['status'] == consts.ACTION_SUCCEEDED: - return True, "" - - elif (action['status'] == consts.ACTION_FAILED or - action['status'] == consts.ACTION_CANCELLED): - return False, "Cluster check action failed or cancelled" - - return False, ("Timeout while waiting for node recovery action to " - "finish") - - def _recover_node(self, ctx, node_id): - """Recover node - - :returns: Recover action - """ - try: - req = objects.NodeRecoverRequest(identity=node_id, - params=self.recover_action) - - return self.rpc_client.call(ctx, 'node_recover', req) - except Exception as ex: - LOG.error("Error when performing node recovery for %s: %s", - node_id, ex) - return None - - def db_create(self): - try: - objects.HealthRegistry.create( - self.ctx, self.cluster_id, self.check_type, self.interval, - self.params, self.engine_id, self.enabled) - return True - except Exception as ex: - LOG.error("Error while adding health entry for cluster %s to " - "database: %s", self.cluster_id, ex) - return False - - def db_delete(self): - try: - objects.HealthRegistry.delete(self.ctx, self.cluster_id) - return True - except Exception as ex: - LOG.error("Error while removing health entry for cluster %s from " - "database: %s", self.cluster_id, ex) - return False - - def enable(self): - try: - objects.HealthRegistry.update(self.ctx, self.cluster_id, - {'enabled': True}) - self.enabled = True - return True - except Exception as ex: - LOG.error("Error while enabling health entry for cluster %s: %s", - self.cluster_id, ex) - return False - - def disable(self): - try: - objects.HealthRegistry.update(self.ctx, self.cluster_id, - {'enabled': False}) - self.enabled = False - return True - except Exception as ex: - LOG.error("Error while disabling health entry for cluster %s: %s", - self.cluster_id, ex) - return False - - -class RuntimeHealthRegistry(object): - def __init__(self, ctx, engine_id, thread_group): - self.ctx = ctx - self.engine_id = engine_id - self.registries = {} - self.tg = thread_group - self.health_check_types = defaultdict(lambda: []) - - def register_cluster(self, cluster_id, interval=None, - node_update_timeout=None, params=None, - enabled=True): - """Register cluster to health registry. - - :param cluster_id: The ID of the cluster to be registered. - :param interval: An optional integer indicating the length of checking - periods in seconds. - :param node_update_timeout: Timeout to wait for node action to - complete. - :param dict params: Other parameters for the health check. - :param enabled: Boolean indicating if the health check is enabled. - :return: RuntimeHealthRegistry object for cluster - """ - params = params or {} - - # extract check_type from params - check_type = "" - if 'detection_modes' in params: - check_type = ','.join([ - NodePollUrlHealthCheck.convert_detection_tuple(d).type - for d in params['detection_modes'] - ]) - - # add node_update_timeout to params - params['node_update_timeout'] = node_update_timeout - entry = None - try: - entry = HealthCheck( - ctx=self.ctx, - engine_id=self.engine_id, - cluster_id=cluster_id, - check_type=check_type, - interval=interval, - node_update_timeout=node_update_timeout, - params=params, - enabled=enabled - ) - if entry.db_create(): - self.registries[cluster_id] = entry - self.add_health_check(self.registries[cluster_id]) - except Exception as ex: - LOG.error("Error while trying to register cluster for health " - "check %s: %s", cluster_id, ex) - if entry: - entry.db_delete() - - def unregister_cluster(self, cluster_id): - """Unregister a cluster from health registry. - - :param cluster_id: The ID of the cluster to be unregistered. - :return: RuntimeHealthRegistry object for the cluster being - unregistered. - """ - entry = None - try: - if cluster_id in self.registries: - entry = self.registries.pop(cluster_id) - entry.db_delete() - except Exception as ex: - LOG.error("Error while trying to unregister cluster from health " - "check %s: %s", cluster_id, ex) - finally: - if entry: - self.remove_health_check(entry) - - def enable_cluster(self, cluster_id): - """Update the status of a cluster to enabled in the health registry. - - :param cluster_id: The ID of the cluster to be enabled. - """ - LOG.info("Enabling health check for cluster %s.", cluster_id) - try: - if cluster_id in self.registries: - if self.registries[cluster_id].enable(): - self.add_health_check(self.registries[cluster_id]) - else: - LOG.error("Unable to enable cluster for health checking: %s", - cluster_id) - except Exception as ex: - LOG.error("Error while enabling health check for cluster %s: %s", - cluster_id, ex) - if cluster_id in self.registries: - self.remove_health_check(self.registries[cluster_id]) - - def disable_cluster(self, cluster_id): - """Update the status of a cluster to disabled in the health registry. - - :param cluster_id: The ID of the cluster to be disabled. - :return: None. - """ - LOG.info("Disabling health check for cluster %s.", cluster_id) - try: - if cluster_id in self.registries: - self.registries[cluster_id].disable() - else: - LOG.error("Unable to disable cluster for health checking: %s", - cluster_id) - except Exception as ex: - LOG.error("Error while disabling health check for cluster %s: %s", - cluster_id, ex) - finally: - if cluster_id in self.registries: - self.remove_health_check(self.registries[cluster_id]) - - def _add_timer(self, cluster_id): - entry = self.registries[cluster_id] - if entry.timer: - LOG.error("Health check for cluster %s already exists", cluster_id) - return None - timer = self.tg.add_dynamic_timer(entry.execute_health_check, None, - None) - if timer: - entry.timer = timer - else: - LOG.error("Error creating timer for cluster: %s", cluster_id) - - def _add_listener(self, cluster_id): - entry = self.registries[cluster_id] - if entry.listener: - LOG.error("Listener for cluster %s already exists", cluster_id) - return - - cluster = objects.Cluster.get(self.ctx, cluster_id, project_safe=False) - if not cluster: - LOG.warning("Cluster (%s) is not found.", cluster_id) - return - profile = objects.Profile.get(self.ctx, cluster.profile_id, - project_safe=False) - profile_type = profile.type.split('-')[0] - if profile_type == 'os.nova.server': - exchange = cfg.CONF.health_manager.nova_control_exchange - elif profile_type == 'os.heat.stack': - exchange = cfg.CONF.health_manager.heat_control_exchange - else: - return - - project = cluster.project - listener = self.tg.add_thread(ListenerProc, exchange, project, - cluster_id, entry.recover_action) - if listener: - entry.listener = listener - else: - LOG.error("Error creating listener for cluster: %s", cluster_id) - - def add_health_check(self, entry): - """Add a health check to the RuntimeHealthRegistry. - - This method creates a timer/thread based on the type of health check - being added. - - :param entry: Entry to add to the registry. - :return: None - """ - if entry.cluster_id in self.registries: - if not entry.enabled: - return - elif entry.timer: - LOG.error("Health check for cluster %s already exists", - entry.cluster_id) - return - else: - LOG.error("Unable to add health check for cluster: %s", - entry.cluster_id) - return - - if entry.type == consts.POLLING: - self._add_timer(entry.cluster_id) - elif entry.type == consts.EVENTS: - LOG.info("Start listening events for cluster (%s).", - entry.cluster_id) - self._add_listener(entry.cluster_id) - else: - LOG.error("Cluster %(id)s type %(type)s is invalid.", - {'id': entry.cluster_id, 'type': entry.type}) - - def remove_health_check(self, entry): - """Remove a health check for the RuntimeHealthRegistry. - - This method stops and removes the timer/thread based to the type of - health check being removed. - - :param entry: - :return: None - """ - if entry.timer: - # stop timer - entry.timer.stop() - - try: - # tell threadgroup to remove timer - self.tg.timer_done(entry.timer) - except ValueError: - pass - finally: - entry.timer = None - - if entry.listener: - try: - self.tg.thread_done(entry.listener) - entry.listener.stop() - except ValueError: - pass - finally: - entry.listener = None - - def load_runtime_registry(self): - """Load the initial runtime registry with a DB scan.""" - db_registries = objects.HealthRegistry.claim(self.ctx, self.engine_id) - - for registry in db_registries: - if registry.cluster_id in self.registries: - LOG.warning("Skipping duplicate health check for cluster: %s", - registry.cluster_id) - # Claiming indicates we claim a health registry who's engine was - # dead, and we will update the health registry's engine_id with - # current engine id. But we may not start check always. - entry = HealthCheck( - ctx=self.ctx, - engine_id=self.engine_id, - cluster_id=registry.cluster_id, - check_type=registry.check_type, - interval=registry.interval, - node_update_timeout=registry.params['node_update_timeout'], - params=registry.params, - enabled=registry.enabled - ) - - LOG.info("Loading cluster %(c)s enabled=%(e)s for " - "health monitoring", - {'c': registry.cluster_id, 'e': registry.enabled}) - self.registries[registry.cluster_id] = entry - if registry.enabled: - self.add_health_check(self.registries[registry.cluster_id]) - - def cleanup_orphaned_healthchecks(self): - """Cleanup orphaned healthchecks.""" - db_registries = db_api.registry_list_ids_by_service( - self.ctx, self.engine_id - ) - for registry_id in self.registries: - if registry_id in db_registries: - continue - entity = self.registries[registry_id] - if not entity: - continue - LOG.info('Removing orphaned health check: %s from %s', - registry_id, self.engine_id) - self.remove_health_check(self.registries[registry_id]) - - -def notify(engine_id, method, **kwargs): - """Send notification to health manager service. - - Note that the health manager only handles JSON type of parameter passing. - - :param engine_id: dispatcher to notify; broadcast if value is None - :param method: remote method to call - """ - timeout = cfg.CONF.engine_life_check_timeout - client = rpc.get_rpc_client(consts.HEALTH_MANAGER_TOPIC, None) - - if engine_id: - # Notify specific dispatcher identified by engine_id - call_context = client.prepare(timeout=timeout, server=engine_id) - else: - # Broadcast to all disptachers - call_context = client.prepare(timeout=timeout) - - ctx = context.get_admin_context() - - try: - call_context.call(ctx, method, **kwargs) - return True - except messaging.MessagingTimeout: - return False - - -def register(cluster_id, engine_id=None, **kwargs): - params = kwargs.pop('params', {}) - interval = kwargs.pop('interval', cfg.CONF.periodic_interval) - node_update_timeout = kwargs.pop('node_update_timeout', 300) - enabled = kwargs.pop('enabled', True) - return notify(engine_id, 'register_cluster', - cluster_id=cluster_id, - interval=interval, - node_update_timeout=node_update_timeout, - params=params, - enabled=enabled) - - -def unregister(cluster_id): - engine_id = get_manager_engine(cluster_id) - if engine_id: - return notify(engine_id, 'unregister_cluster', cluster_id=cluster_id) - return True - - -def enable(cluster_id, **kwargs): - engine_id = get_manager_engine(cluster_id) - if engine_id: - return notify(engine_id, 'enable_cluster', cluster_id=cluster_id, - params=kwargs) - return False - - -def disable(cluster_id, **kwargs): - engine_id = get_manager_engine(cluster_id) - if engine_id: - return notify(engine_id, 'disable_cluster', cluster_id=cluster_id, - params=kwargs) - return False - - -def get_manager_engine(cluster_id): - ctx = context.get_admin_context() - - registry = objects.HealthRegistry.get(ctx, cluster_id) - if not registry: - return None - - return registry.engine_id diff --git a/senlin/engine/node.py b/senlin/engine/node.py deleted file mode 100644 index fd7de4021..000000000 --- a/senlin/engine/node.py +++ /dev/null @@ -1,476 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common import utils -from senlin.objects import node as no -from senlin.profiles import base as pb - -LOG = logging.getLogger(__name__) - - -class Node(object): - """A node is an object that can belong to at most one single cluster. - - All operations are performed without further checking because the - checkings are supposed to be done before/after/during an action is - executed. - """ - - def __init__(self, name, profile_id, cluster_id=None, context=None, - **kwargs): - self.id = kwargs.get('id', None) - if name: - self.name = name - else: - self.name = 'node-' + utils.random_name(8) - - # This is a safe guard to ensure that we have orphan node's cluster - # correctly set to an empty string - if cluster_id is None: - cluster_id = '' - - self.physical_id = kwargs.get('physical_id', None) - self.profile_id = profile_id - self.user = kwargs.get('user', '') - self.project = kwargs.get('project', '') - self.domain = kwargs.get('domain', '') - self.cluster_id = cluster_id - self.index = kwargs.get('index', -1) - self.role = kwargs.get('role', '') - - self.init_at = kwargs.get('init_at', None) - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.status = kwargs.get('status', consts.NS_INIT) - self.status_reason = kwargs.get('status_reason', 'Initializing') - self.data = kwargs.get('data', {}) - self.metadata = kwargs.get('metadata', {}) - self.dependents = kwargs.get('dependents', {}) - self.tainted = False - self.rt = {} - - if context is not None: - if self.user == '': - self.user = context.user_id - if self.project == '': - self.project = context.project_id - if self.domain == '': - self.domain = context.domain_id - self._load_runtime_data(context) - - def _load_runtime_data(self, context): - profile = None - try: - profile = pb.Profile.load(context, profile_id=self.profile_id, - project_safe=False) - except exc.ResourceNotFound: - LOG.debug('Profile not found: %s', self.profile_id) - - self.rt = {'profile': profile} - - def store(self, context): - """Store the node into database table. - - The invocation of object API could be a node_create or a node_update, - depending on whether node has an ID assigned. - - @param context: Request context for node creation. - @return: UUID of node created. - """ - values = { - 'name': self.name, - 'physical_id': self.physical_id, - 'cluster_id': self.cluster_id, - 'profile_id': self.profile_id, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'index': self.index, - 'role': self.role, - 'init_at': self.init_at, - 'created_at': self.created_at, - 'updated_at': self.updated_at, - 'status': self.status, - 'status_reason': self.status_reason, - 'meta_data': self.metadata, - 'data': self.data, - 'dependents': self.dependents, - 'tainted': self.tainted, - } - - if self.id: - no.Node.update(context, self.id, values) - else: - init_at = timeutils.utcnow(True) - self.init_at = init_at - values['init_at'] = init_at - node = no.Node.create(context, values) - self.id = node.id - - self._load_runtime_data(context) - return self.id - - @classmethod - def _from_object(cls, context, obj): - """Construct a node from node object. - - @param context: the context used for DB operations; - @param obj: a node object that contains all fields; - """ - kwargs = { - 'id': obj.id, - 'physical_id': obj.physical_id, - 'user': obj.user, - 'project': obj.project, - 'domain': obj.domain, - 'index': obj.index, - 'role': obj.role, - 'init_at': obj.init_at, - 'created_at': obj.created_at, - 'updated_at': obj.updated_at, - 'status': obj.status, - 'status_reason': obj.status_reason, - 'data': obj.data, - 'metadata': obj.metadata, - 'dependents': obj.dependents, - 'tainted': obj.tainted, - } - - return cls(obj.name, obj.profile_id, obj.cluster_id, - context=context, **kwargs) - - @classmethod - def load(cls, context, node_id=None, db_node=None, project_safe=True): - """Retrieve a node from database.""" - if db_node is None: - db_node = no.Node.get(context, node_id, project_safe=project_safe) - if db_node is None: - raise exc.ResourceNotFound(type='node', id=node_id) - - return cls._from_object(context, db_node) - - @classmethod - def load_all(cls, context, cluster_id=None, limit=None, marker=None, - sort=None, filters=None, project_safe=True): - """Retrieve all nodes of from database.""" - objs = no.Node.get_all(context, cluster_id=cluster_id, - filters=filters, sort=sort, - limit=limit, marker=marker, - project_safe=project_safe) - - for obj in objs: - node = cls._from_object(context, obj) - yield node - - def set_status(self, context, status, reason=None, **params): - """Set status of the node. - - :param context: The request context. - :param status: New status for the node. - :param reason: The reason that leads the node to its current status. - :param kwargs params: Other properties that need an update. - :returns: ``None``. - """ - values = {} - now = timeutils.utcnow(True) - if status == consts.NS_ACTIVE and self.status == consts.NS_CREATING: - self.created_at = values['created_at'] = now - if status not in [consts.NS_CREATING, consts.NS_UPDATING, - consts.NS_RECOVERING, consts.NS_OPERATING]: - self.updated_at = values['updated_at'] = now - - self.status = status - values['status'] = status - if reason: - self.status_reason = reason - values['status_reason'] = reason - for p, v in params.items(): - setattr(self, p, v) - values[p] = v - no.Node.update(context, self.id, values) - - def get_details(self, context): - if not self.physical_id: - return {} - return pb.Profile.get_details(context, self) - - def do_create(self, context): - if self.status != consts.NS_INIT: - LOG.error('Node is in status "%s"', self.status) - self.set_status(context, consts.NS_ERROR, - 'Node must be in INIT status') - return False, 'Node must be in INIT status' - - self.set_status(context, consts.NS_CREATING, 'Creation in progress') - try: - physical_id = pb.Profile.create_object(context, self) - except exc.EResourceCreation as ex: - physical_id = ex.resource_id - self.set_status(context, consts.NS_ERROR, str(ex), - physical_id=physical_id) - return False, str(ex) - - self.set_status(context, consts.NS_ACTIVE, 'Creation succeeded', - physical_id=physical_id) - return True, None - - def do_delete(self, context): - self.set_status(context, consts.NS_DELETING, 'Deletion in progress') - try: - # The following operation always return True unless exception - # is thrown - pb.Profile.delete_object(context, self) - except exc.EResourceDeletion as ex: - self.set_status(context, consts.NS_ERROR, str(ex)) - return False - - no.Node.delete(context, self.id) - return True - - def do_update(self, context, params): - """Update a node's property. - - This function is supposed to be invoked from a NODE_UPDATE action. - :param dict params: parameters in a dictionary that may contain keys - like 'new_profile_id', 'name', 'role', 'metadata'. - """ - if not self.physical_id: - return False - - self.set_status(context, consts.NS_UPDATING, 'Update in progress') - - new_profile_id = params.pop('new_profile_id', None) - res = True - if new_profile_id: - try: - res = pb.Profile.update_object(context, self, new_profile_id, - **params) - except exc.EResourceUpdate as ex: - self.set_status(context, consts.NS_ERROR, str(ex)) - return False - - # update was not successful - if not res: - return False - - props = dict([(k, v) for k, v in params.items() - if k in ('name', 'role', 'metadata', 'tainted')]) - if new_profile_id: - props['profile_id'] = new_profile_id - self.rt['profile'] = pb.Profile.load(context, - profile_id=new_profile_id) - - self.set_status(context, consts.NS_ACTIVE, 'Update succeeded', **props) - - return True - - def do_join(self, context, cluster_id): - if self.cluster_id == cluster_id: - return True - - try: - res = pb.Profile.join_cluster(context, self, cluster_id) - except exc.EResourceUpdate as ex: - LOG.error('Node join cluster faild: %s.', ex) - return False - - if not res: - return False - timestamp = timeutils.utcnow(True) - db_node = no.Node.migrate(context, self.id, cluster_id, timestamp) - self.cluster_id = cluster_id - self.updated_at = timestamp - self.index = db_node.index - return True - - def do_leave(self, context): - if self.cluster_id == '': - return True - - try: - res = pb.Profile.leave_cluster(context, self) - except exc.EResourceDeletion as ex: - LOG.error('Node leave cluster faild: %s.', ex) - return False - - if not res: - return False - timestamp = timeutils.utcnow(True) - no.Node.migrate(context, self.id, None, timestamp) - self.cluster_id = '' - self.updated_at = timestamp - self.index = -1 - return True - - def do_check(self, context): - if not self.physical_id: - return False - - try: - res = pb.Profile.check_object(context, self) - except exc.EServerNotFound as ex: - self.set_status(context, consts.NS_ERROR, str(ex), - physical_id=None) - return True - except exc.EResourceOperation as ex: - self.set_status(context, consts.NS_ERROR, str(ex)) - return False - - # Physical object is ACTIVE but for some reason the node status in - # senlin was WARNING. We only update the status_reason - if res: - if self.status == consts.NS_WARNING: - msg = ("Check: Physical object is ACTIVE but the node status " - "was WARNING. %s") % self.status_reason - self.set_status(context, consts.NS_WARNING, msg) - return True - - self.set_status(context, consts.NS_ACTIVE, - "Check: Node is ACTIVE.") - else: - self.set_status(context, consts.NS_ERROR, - "Check: Node is not ACTIVE.") - - return True - - def do_healthcheck(self, context, health_check_type): - """health check a node. - - This function is supposed to be invoked from the health manager to - check the health of a given node - :param context: The request context of the action. - :returns: True if node is healthy. False otherwise. - """ - - return pb.Profile.healthcheck_object(context, self, health_check_type) - - def do_recover(self, context, action): - """recover a node. - - This function is supposed to be invoked from a NODE_RECOVER action. - :param context: The request context of the action. - :param dict options: A map containing the recovery actions (with - parameters if any) and fencing settings. - """ - options = action.inputs - - operation = options.get('operation', None) - - if (not self.physical_id and operation and - (operation.upper() == consts.RECOVER_REBOOT or - operation.upper() == consts.RECOVER_REBUILD)): - # physical id is required for REBOOT or REBUILD operations - LOG.warning('Recovery failed because node has no physical id' - ' was provided for reboot or rebuild operation.') - return False - - if options.get('check', False): - res = False - try: - res = pb.Profile.check_object(context, self) - except exc.EResourceOperation: - pass - - if res: - self.set_status(context, consts.NS_ACTIVE, - reason="Recover: Node is ACTIVE.") - return True - - self.set_status(context, consts.NS_RECOVERING, - reason='Recovery in progress') - - try: - physical_id, status = pb.Profile.recover_object(context, - self, **options) - except exc.EResourceOperation as ex: - physical_id = ex.resource_id - self.set_status(context, consts.NS_ERROR, reason=str(ex), - physical_id=physical_id) - return False - - if not status: - self.set_status(context, consts.NS_ERROR, reason='Recovery failed') - return False - - params = {} - if physical_id and self.physical_id != physical_id: - self.data['recovery'] = consts.RECOVER_RECREATE - params['data'] = self.data - params['physical_id'] = physical_id - self.set_status(context, consts.NS_ACTIVE, - reason='Recovery succeeded', **params) - - return True - - def do_operation(self, context, **inputs): - """Perform an operation on a node. - - :param context: The request context. - :param dict inputs: The operation and parameters if any. - :returns: A boolean indicating whether the operation was a success. - """ - if not self.physical_id: - return False - - op = inputs['operation'] - params = inputs.get('params', {}) - self.set_status(context, consts.NS_OPERATING, - reason="Operation '%s' in progress" % op) - - try: - profile = self.rt['profile'] - method = getattr(profile, 'handle_%s' % op) - method(self, **params) - except exc.EResourceOperation as ex: - LOG.error('Node operation %s failed: %s.', op, ex) - self.set_status(context, consts.NS_ERROR, reason=str(ex)) - return False - - self.set_status(context, consts.NS_ACTIVE, - reason="Operation '%s' succeeded" % op) - return True - - def run_workflow(self, **options): - if not self.physical_id: - return False - - workflow_name = options.pop('workflow_name') - inputs = options.pop('inputs') - definition = inputs.pop('definition', None) - params = { - 'cluster_id': self.cluster_id, - 'node_id': self.physical_id, - } - params.update(inputs) - - try: - profile = self.rt['profile'] - wfc = profile.workflow(self) - workflow = wfc.workflow_find(workflow_name) - if workflow is None: - wfc.workflow_create(definition, scope="private") - else: - definition = workflow.definition - inputs_str = jsonutils.dumps(params) - wfc.execution_create(workflow_name, str(inputs_str)) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='executing', type='workflow', - id=workflow_name, - message=str(ex)) - return True diff --git a/senlin/engine/notifications/__init__.py b/senlin/engine/notifications/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/engine/notifications/base.py b/senlin/engine/notifications/base.py deleted file mode 100644 index d7607f99c..000000000 --- a/senlin/engine/notifications/base.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_log import log as logging -from senlin import objects - -LOG = logging.getLogger(__name__) - - -class Endpoints(object): - - def __init__(self, project_id, cluster_id, recover_action): - self.cluster_id = cluster_id - self.project_id = project_id - self.recover_action = recover_action - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - raise NotImplementedError - - def _check_registry_status(self, ctx, engine_id, cluster_id): - registry = objects.HealthRegistry.get_by_engine(ctx, engine_id, - cluster_id) - - if registry is None: - return False - - if registry.enabled is True: - return True - - return False diff --git a/senlin/engine/notifications/heat_endpoint.py b/senlin/engine/notifications/heat_endpoint.py deleted file mode 100644 index faabf655e..000000000 --- a/senlin/engine/notifications/heat_endpoint.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging - -from senlin.common import context -from senlin.engine.notifications import base -from senlin import objects -from senlin.rpc import client as rpc_client - -LOG = logging.getLogger(__name__) - - -class HeatNotificationEndpoint(base.Endpoints): - - STACK_FAILURE_EVENTS = { - 'orchestration.stack.delete.end': 'DELETE', - } - - def __init__(self, project_id, cluster_id, recover_action): - super(HeatNotificationEndpoint, self).__init__( - project_id, cluster_id, recover_action - ) - self.filter_rule = messaging.NotificationFilter( - publisher_id='^orchestration.*', - event_type='^orchestration\.stack\..*', - context={'project_id': '^%s$' % project_id}) - self.rpc = rpc_client.get_engine_client() - self.target = messaging.Target( - topic=cfg.CONF.health_manager.heat_notification_topic, - exchange=cfg.CONF.health_manager.heat_control_exchange, - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - if event_type not in self.STACK_FAILURE_EVENTS: - return - - tags = payload['tags'] - if tags is None or tags == []: - return - - cluster_id = None - node_id = None - for tag in tags: - if cluster_id is None: - start = tag.find('cluster_id') - if start == 0 and tag[11:] == self.cluster_id: - cluster_id = tag[11:] - if node_id is None: - start = tag.find('cluster_node_id') - if start == 0: - node_id = tag[16:] - - if cluster_id is None or node_id is None: - return - - params = { - 'event': self.STACK_FAILURE_EVENTS[event_type], - 'state': payload.get('state', 'Unknown'), - 'stack_id': payload.get('stack_identity', 'Unknown'), - 'timestamp': metadata['timestamp'], - 'publisher': publisher_id, - 'operation': self.recover_action['operation'], - } - LOG.info("Requesting stack recovery: %s", node_id) - ctx = context.get_service_context(project_id=self.project_id, - user_id=payload['user_identity']) - req = objects.NodeRecoverRequest(identity=node_id, params=params) - self.rpc.call(ctx, 'node_recover', req) diff --git a/senlin/engine/notifications/message.py b/senlin/engine/notifications/message.py deleted file mode 100644 index 9bc9333db..000000000 --- a/senlin/engine/notifications/message.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_context import context as oslo_context -from oslo_log import log as logging -import tenacity - -from senlin.common import context as senlin_context -from senlin.common import exception -from senlin.drivers import base as driver_base -from senlin.objects import credential as co - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -RETRY_ATTEMPTS = 3 -RETRY_INITIAL_DELAY = 1 -RETRY_BACKOFF = 1 -RETRY_MAX = 3 - - -class Message(object): - """Zaqar message type of notification.""" - def __init__(self, queue_name, **kwargs): - self.user = kwargs.get('user', '') - self.project = kwargs.get('project', '') - self.domain = kwargs.get('domain', '') - - self.queue_name = queue_name - - self._zaqarclient = None - self._keystoneclient = None - - def zaqar(self): - if self._zaqarclient is not None: - return self._zaqarclient - params = self._build_conn_params(self.user, self.project) - self._zaqarclient = driver_base.SenlinDriver().message(params) - return self._zaqarclient - - def _build_conn_params(self, user, project): - """Build connection params for specific user and project. - - :param user: The ID of the user for which a trust will be used. - :param project: The ID of the project for which a trust will be used. - :returns: A dict containing the required parameters for connection - creation. - """ - service_creds = senlin_context.get_service_credentials() - params = { - 'username': service_creds.get('username'), - 'password': service_creds.get('password'), - 'auth_url': service_creds.get('auth_url'), - 'user_domain_name': service_creds.get('user_domain_name'), - 'project_domain_name': service_creds.get('project_domain_name'), - 'verify': service_creds.get('verify'), - 'interface': service_creds.get('interface'), - } - - cred = co.Credential.get(oslo_context.get_current(), user, project) - if cred is None: - raise exception.TrustNotFound(trustor=user) - params['trust_id'] = cred.cred['openstack']['trust'] - - return params - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(exception.EResourceCreation), - wait=tenacity.wait_incrementing( - RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), - stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) - def post_lifecycle_hook_message(self, lifecycle_action_token, node_id, - resource_id, lifecycle_transition_type): - message_list = [{ - "ttl": CONF.notification.ttl, - "body": { - "lifecycle_action_token": lifecycle_action_token, - "node_id": node_id, - "resource_id": resource_id, - "lifecycle_transition_type": lifecycle_transition_type - } - }] - try: - if not self.zaqar().queue_exists(self.queue_name): - kwargs = { - "_max_messages_post_size": - CONF.notification.max_message_size, - "description": "Senlin lifecycle hook notification", - "name": self.queue_name - } - self.zaqar().queue_create(**kwargs) - - return self.zaqar().message_post(self.queue_name, message_list) - except exception.InternalError as ex: - raise exception.EResourceCreation( - type='queue', - message=str(ex)) diff --git a/senlin/engine/notifications/nova_endpoint.py b/senlin/engine/notifications/nova_endpoint.py deleted file mode 100644 index 2447db4af..000000000 --- a/senlin/engine/notifications/nova_endpoint.py +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging - -from senlin.common import context -from senlin.engine.notifications import base -from senlin import objects -from senlin.rpc import client as rpc_client - -LOG = logging.getLogger(__name__) - - -class NovaNotificationEndpoint(base.Endpoints): - - VM_FAILURE_EVENTS = { - 'compute.instance.pause.end': 'PAUSE', - 'compute.instance.power_off.end': 'POWER_OFF', - 'compute.instance.rebuild.error': 'REBUILD', - 'compute.instance.shutdown.end': 'SHUTDOWN', - 'compute.instance.soft_delete.end': 'SOFT_DELETE', - } - - def __init__(self, project_id, cluster_id, recover_action): - super(NovaNotificationEndpoint, self).__init__( - project_id, cluster_id, recover_action - ) - self.filter_rule = messaging.NotificationFilter( - publisher_id='^compute.*', - event_type='^compute\.instance\..*', - context={'project_id': '^%s$' % project_id}) - self.rpc = rpc_client.get_engine_client() - self.target = messaging.Target( - topic=cfg.CONF.health_manager.nova_notification_topic, - exchange=cfg.CONF.health_manager.nova_control_exchange, - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - meta = payload['metadata'] - cluster_id = meta.get('cluster_id') - if not cluster_id: - return - - if self.cluster_id != cluster_id: - return - - if event_type not in self.VM_FAILURE_EVENTS: - return - - params = { - 'event': self.VM_FAILURE_EVENTS[event_type], - 'state': payload.get('state', 'Unknown'), - 'instance_id': payload.get('instance_id', 'Unknown'), - 'timestamp': metadata['timestamp'], - 'publisher': publisher_id, - 'operation': self.recover_action['operation'], - } - node_id = meta.get('cluster_node_id') - if node_id: - LOG.info("Requesting node recovery: %s", node_id) - ctx = context.get_service_context(project_id=self.project_id, - user_id=payload['user_id']) - req = objects.NodeRecoverRequest(identity=node_id, - params=params) - self.rpc.call(ctx, 'node_recover', req) - - def warn(self, ctxt, publisher_id, event_type, payload, metadata): - meta = payload.get('metadata', {}) - if meta.get('cluster_id') == self.cluster_id: - LOG.warning("publisher=%s", publisher_id) - LOG.warning("event_type=%s", event_type) - - def debug(self, ctxt, publisher_id, event_type, payload, metadata): - meta = payload.get('metadata', {}) - if meta.get('cluster_id') == self.cluster_id: - LOG.debug("publisher=%s", publisher_id) - LOG.debug("event_type=%s", event_type) diff --git a/senlin/engine/parser.py b/senlin/engine/parser.py deleted file mode 100644 index ea7715257..000000000 --- a/senlin/engine/parser.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_serialization import jsonutils -import urllib -import yaml - -from senlin.common.i18n import _ - - -# Try LibYAML if available -if hasattr(yaml, 'CSafeLoader'): - Loader = yaml.CSafeLoader -else: - Loader = yaml.SafeLoader - -if hasattr(yaml, 'CSafeDumper'): - Dumper = yaml.CSafeDumper -else: - Dumper = yaml.SafeDumper - - -class YamlLoader(Loader): - def normalise_file_path_to_url(self, path): - if urllib.parse.urlparse(path).scheme: - return path - path = os.path.abspath(path) - return urllib.parse.urljoin('file:', - urllib.request.pathname2url(path)) - - def include(self, node): - url = None - try: - url = self.normalise_file_path_to_url(self.construct_scalar(node)) - tmpl = urllib.request.urlopen(url).read() - return yaml.safe_load(tmpl) - except urllib.error.URLError as ex: - raise IOError('Failed retrieving file %s: %s' % (url, ex)) - - def process_unicode(self, node): - # Override the default string handling function to always return - # unicode objects - return self.construct_scalar(node) - - -YamlLoader.add_constructor('!include', YamlLoader.include) -YamlLoader.add_constructor(u'tag:yaml.org,2002:str', - YamlLoader.process_unicode) -YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp', - YamlLoader.process_unicode) - - -def simple_parse(in_str): - try: - out_dict = jsonutils.loads(in_str) - except ValueError: - try: - out_dict = yaml.load(in_str, Loader=YamlLoader) - except yaml.YAMLError as yea: - raise ValueError(_('Error parsing input: %s') % yea) - else: - if out_dict is None: - out_dict = {} - - if not isinstance(out_dict, dict): - raise ValueError(_('The input is not a JSON object or YAML mapping.')) - - return out_dict diff --git a/senlin/engine/receivers/__init__.py b/senlin/engine/receivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/engine/receivers/base.py b/senlin/engine/receivers/base.py deleted file mode 100644 index e957240bd..000000000 --- a/senlin/engine/receivers/base.py +++ /dev/null @@ -1,248 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context as oslo_context -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import context as senlin_context -from senlin.common import exception -from senlin.common import utils -from senlin.drivers import base as driver_base -from senlin.objects import credential as co -from senlin.objects import receiver as ro - -LOG = logging.getLogger(__name__) - - -class Receiver(object): - """Create a Receiver which is used to trigger a cluster action.""" - - def __new__(cls, rtype, cluster_id=None, action=None, **kwargs): - """Create a new receiver object. - - :param rtype: Type name of receiver. - :param cluster_id: ID of the targeted cluster. - :param action: Targeted action for execution. - :param kwargs: A dict containing optional parameters. - :returns: An instance of a specific sub-class of Receiver. - """ - if rtype == consts.RECEIVER_WEBHOOK: - from senlin.engine.receivers import webhook - ReceiverClass = webhook.Webhook - elif rtype == consts.RECEIVER_MESSAGE: - from senlin.engine.receivers import message - ReceiverClass = message.Message - else: - ReceiverClass = Receiver - - return super(Receiver, cls).__new__(ReceiverClass) - - def __init__(self, rtype, cluster_id=None, action=None, **kwargs): - - self.id = kwargs.get('id', None) - self.name = kwargs.get('name', None) - self.type = rtype - self.user = kwargs.get('user', '') - self.project = kwargs.get('project', '') - self.domain = kwargs.get('domain', '') - - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.cluster_id = cluster_id - self.action = action - self.actor = kwargs.get('actor', {}) - self.params = kwargs.get('params', {}) - self.channel = kwargs.get('channel', {}) - - def store(self, context, update=False): - """Store the receiver in database and return its ID. - - :param context: Context for DB operations. - """ - timestamp = timeutils.utcnow(True) - self.created_at = timeutils.utcnow(True) - values = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'created_at': self.created_at, - 'updated_at': self.updated_at, - 'cluster_id': self.cluster_id, - 'actor': self.actor, - 'action': self.action, - 'params': self.params, - 'channel': self.channel, - } - - if update: - self.updated_at = timestamp - values['updated_at'] = timestamp - ro.Receiver.update(context, self.id, values) - else: - self.created_at = timestamp - values['created_at'] = timestamp - receiver = ro.Receiver.create(context, values) - self.id = receiver.id - - return self.id - - @classmethod - def create(cls, context, rtype, cluster, action, **kwargs): - cdata = dict() - if rtype == consts.RECEIVER_WEBHOOK and context.is_admin: - # use object owner if request is from admin - cred = co.Credential.get(context, cluster.user, cluster.project) - trust_id = cred['cred']['openstack']['trust'] - cdata['trust_id'] = trust_id - else: - # otherwise, use context user - cdata['trust_id'] = context.trusts - - kwargs['actor'] = cdata - kwargs['user'] = context.user_id - kwargs['project'] = context.project_id - kwargs['domain'] = context.domain_id - kwargs['id'] = uuidutils.generate_uuid() - cluster_id = cluster.id if cluster else None - obj = cls(rtype, cluster_id, action, **kwargs) - obj.initialize_channel(context) - obj.store(context) - - return obj - - @classmethod - def _from_object(cls, receiver): - """Construct a receiver from receiver object. - - @param cls: The target class. - @param receiver: a receiver object that contains all fields. - """ - kwargs = { - 'id': receiver.id, - 'name': receiver.name, - 'user': receiver.user, - 'project': receiver.project, - 'domain': receiver.domain, - 'created_at': receiver.created_at, - 'updated_at': receiver.updated_at, - 'actor': receiver.actor, - 'params': receiver.params, - 'channel': receiver.channel, - } - - return cls(receiver.type, receiver.cluster_id, receiver.action, - **kwargs) - - @classmethod - def load(cls, context, receiver_id=None, receiver_obj=None, - project_safe=True): - """Retrieve a receiver from database. - - @param context: the context for db operations. - @param receiver_id: the unique ID of the receiver to retrieve. - @param receiver_obj: the DB object of a receiver to retrieve. - @param project_safe: Optional parameter specifying whether only - receiver belong to the context.project will be - loaded. - """ - if receiver_obj is None: - receiver_obj = ro.Receiver.get(context, receiver_id, - project_safe=project_safe) - if receiver_obj is None: - raise exception.ResourceNotFound(type='receiver', - id=receiver_id) - - return cls._from_object(receiver_obj) - - def to_dict(self): - info = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'cluster_id': self.cluster_id, - 'actor': self.actor, - 'action': self.action, - 'params': self.params, - 'channel': self.channel, - } - return info - - def initialize_channel(self, context): - return {} - - def release_channel(self, context): - return - - def notify(self, context, params=None): - return - - @classmethod - def delete(cls, context, receiver_id): - """Delete a receiver. - - @param context: the context for db operations. - @param receiver_id: the unique ID of the receiver to delete. - """ - receiver_obj = cls.load(context, receiver_id=receiver_id) - receiver_obj.release_channel(context) - ro.Receiver.delete(context, receiver_obj.id) - - return - - def _get_base_url(self): - base = None - service_cred = senlin_context.get_service_credentials() - kc = driver_base.SenlinDriver().identity(service_cred) - try: - base = kc.get_senlin_endpoint() - except exception.InternalError as ex: - LOG.warning('Senlin endpoint can not be found: %s.', ex) - - return base - - def _build_conn_params(self, user, project): - """Build connection params for specific user and project. - - :param user: The ID of the user for which a trust will be used. - :param project: The ID of the project for which a trust will be used. - :returns: A dict containing the required parameters for connection - creation. - """ - service_creds = senlin_context.get_service_credentials() - params = { - 'username': service_creds.get('username'), - 'password': service_creds.get('password'), - 'auth_url': service_creds.get('auth_url'), - 'user_domain_name': service_creds.get('user_domain_name'), - 'project_domain_name': service_creds.get('project_domain_name'), - 'verify': service_creds.get('verify'), - 'interface': service_creds.get('interface'), - } - - cred = co.Credential.get(oslo_context.get_current(), user, project) - if cred is None: - raise exception.TrustNotFound(trustor=user) - params['trust_id'] = cred.cred['openstack']['trust'] - - return params diff --git a/senlin/engine/receivers/message.py b/senlin/engine/receivers/message.py deleted file mode 100644 index e465334f9..000000000 --- a/senlin/engine/receivers/message.py +++ /dev/null @@ -1,290 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.drivers import base as driver_base -from senlin.engine.actions import base as action_mod -from senlin.engine import dispatcher -from senlin.engine.receivers import base -from senlin.objects import cluster as cluster_obj - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class Message(base.Receiver): - """Zaqar message type of receivers.""" - def __init__(self, rtype, cluster_id, action, **kwargs): - super(Message, self).__init__(rtype, cluster_id, action, **kwargs) - self._zaqarclient = None - self._keystoneclient = None - - def zaqar(self): - if self._zaqarclient is not None: - return self._zaqarclient - params = self._build_conn_params(self.user, self.project) - self._zaqarclient = driver_base.SenlinDriver().message(params) - return self._zaqarclient - - def keystone(self): - if self._keystoneclient is not None: - return self._keystoneclient - params = self._build_conn_params(self.user, self.project) - self._keystoneclient = driver_base.SenlinDriver().identity(params) - return self._keystoneclient - - def _generate_subscriber_url(self): - host = CONF.receiver.host - port = CONF.receiver.port - base = None - - if not host: - # Try to get base url by querying senlin endpoint if host - # is not provided in configuration file - base = self._get_base_url() - if not base: - LOG.warning('Receiver notification host is not specified in ' - 'configuration file and Senlin service ' - 'endpoint can not be found, using local' - ' hostname (%(host)s) for subscriber url.', - {'host': host}) - host = socket.gethostname() - - if not base: - base = "http://%(h)s:%(p)s/v1" % {'h': host, 'p': port} - sub_url = "/receivers/%(id)s/notify" % {'id': self.id} - - return "".join(["trust+", base, sub_url]) - - def _build_trust(self): - # Get zaqar trustee user ID for trust building - auth = ks_loading.load_auth_from_conf_options(CONF, 'zaqar') - session = ks_loading.load_session_from_conf_options(CONF, 'zaqar') - zaqar_trustee_user_id = session.get_user_id(auth=auth) - try: - trust = self.keystone().trust_get_by_trustor(self.user, - zaqar_trustee_user_id, - self.project) - if not trust: - # Create a trust if no existing one found - roles = self.notifier_roles - for role in roles: - # Remove 'admin' role from delegated roles list - # unless it is the only role user has - if role == 'admin' and len(roles) > 1: - roles.remove(role) - trust = self.keystone().trust_create(self.user, - zaqar_trustee_user_id, - self.project, - roles) - except exc.InternalError as ex: - LOG.error('Can not build trust between user %(user)s and zaqar ' - 'service user %(zaqar)s for receiver %(receiver)s.', - { - 'user': self.user, - 'zaqar': zaqar_trustee_user_id, - 'receiver': self.id - }) - raise exc.EResourceCreation(type='trust', - message=str(ex)) - return trust.id - - def _create_queue(self): - queue_name = "senlin-receiver-%s" % self.id - kwargs = { - "_max_messages_post_size": CONF.receiver.max_message_size, - "description": "Senlin receiver %s." % self.id, - "name": queue_name - } - try: - self.zaqar().queue_create(**kwargs) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='queue', - message=str(ex)) - - return queue_name - - def _create_subscription(self, queue_name): - subscriber = self._generate_subscriber_url() - trust_id = self._build_trust() - - # FIXME(Yanyanhu): For Zaqar doesn't support to create a - # subscription that never expires, we specify a very large - # ttl value which doesn't exceed the max time of python. - kwargs = { - "ttl": 2 ** 36, - "subscriber": subscriber, - "options": { - "trust_id": trust_id - } - } - try: - subscription = self.zaqar().subscription_create(queue_name, - **kwargs) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='subscription', - message=str(ex)) - return subscription - - def _find_cluster(self, context, identity): - """Find a cluster with the given identity.""" - if uuidutils.is_uuid_like(identity): - cluster = cluster_obj.Cluster.get(context, identity) - if not cluster: - cluster = cluster_obj.Cluster.get_by_name(context, identity) - else: - cluster = cluster_obj.Cluster.get_by_name(context, identity) - # maybe it is a short form of UUID - if not cluster: - cluster = cluster_obj.Cluster.get_by_short_id(context, - identity) - - if not cluster: - raise exc.ResourceNotFound(type='cluster', id=identity) - - return cluster - - def _build_action(self, context, message): - body = message.get('body', None) - if not body: - msg = _('Message body is empty.') - raise exc.InternalError(message=msg) - - # Message format check - cluster = body.get('cluster', None) - action = body.get('action', None) - params = body.get('params', {}) - if not cluster or not action: - msg = _('Both cluster identity and action must be specified.') - raise exc.InternalError(message=msg) - - # Cluster existence check - # TODO(YanyanHu): Or maybe we can relax this constraint to allow - # user to trigger CLUSTER_CREATE action by sending message? - try: - cluster_obj = self._find_cluster(context, cluster) - except exc.ResourceNotFound: - msg = _('Cluster (%(cid)s) cannot be found.' - ) % {'cid': cluster} - raise exc.InternalError(message=msg) - - # Permission check - if not context.is_admin and self.user != cluster_obj.user: - msg = _('%(user)s is not allowed to trigger actions on ' - 'cluster %(cid)s.') % {'user': self.user, - 'cid': cluster} - raise exc.InternalError(message=msg) - - # Use receiver owner context to build action - context.user_id = self.user - context.project_id = self.project - context.domain_id = self.domain - - # Action name check - if action not in consts.CLUSTER_ACTION_NAMES: - msg = _("Illegal cluster action '%s' specified.") % action - raise exc.InternalError(message=msg) - - kwargs = { - 'name': 'receiver_%s_%s' % (self.id[:8], message['id'][:8]), - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': params - } - action_id = action_mod.Action.create(context, cluster_obj.id, - action, **kwargs) - - return action_id - - def initialize_channel(self, context): - self.notifier_roles = context.roles - queue_name = self._create_queue() - subscription = self._create_subscription(queue_name) - - self.channel = { - 'queue_name': queue_name, - 'subscription': subscription.subscription_id - } - return self.channel - - def release_channel(self, context): - queue_name = self.channel['queue_name'] - subscription = self.channel['subscription'] - - # Delete subscription on zaqar queue - try: - self.zaqar().subscription_delete(queue_name, subscription) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='subscription', - id='subscription', - message=str(ex)) - # Delete zaqar queue - try: - self.zaqar().queue_delete(queue_name) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='queue', - id='queue_name', - message=str(ex)) - - def notify(self, context, params=None): - queue_name = self.channel['queue_name'] - # Claim message(s) from queue - # TODO(Yanyanhu) carefully handling claim ttl to avoid - # potential race condition. - try: - claim = self.zaqar().claim_create(queue_name) - messages = claim.messages - except exc.InternalError as ex: - LOG.error('Failed in claiming message: %s', ex) - return - - # Build actions - actions = [] - if messages: - for message in messages: - try: - action_id = self._build_action(context, message) - actions.append(action_id) - except exc.InternalError as ex: - LOG.error('Failed in building action: %s', ex) - try: - self.zaqar().message_delete(queue_name, message['id'], - claim.id) - except exc.InternalError as ex: - LOG.error('Failed in deleting message %(id)s: %(reason)s', - {'id': message['id'], 'reason': ex}) - - self.zaqar().claim_delete(queue_name, claim.id) - LOG.info('Actions %(actions)s were successfully built.', - {'actions': actions}) - - dispatcher.start_action() - - return actions - - def to_dict(self): - message = super(Message, self).to_dict() - # Pop subscription from channel info since it - # should be invisible for user. - message['channel'].pop('subscription') - - return message diff --git a/senlin/engine/receivers/webhook.py b/senlin/engine/receivers/webhook.py deleted file mode 100644 index d2e92800a..000000000 --- a/senlin/engine/receivers/webhook.py +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from oslo_config import cfg -from oslo_log import log as logging -from urllib import parse - -from senlin.engine.receivers import base - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -class Webhook(base.Receiver): - """Webhook flavor of receivers.""" - WEBHOOK_VERSION = 2 - - def initialize_channel(self, context): - host = CONF.receiver.host - port = CONF.receiver.port - base = None - - if not host: - # Try to get base url by querying senlin endpoint if host - # is not provided in configuration file - base = self._get_base_url() - if not base: - host = socket.gethostname() - LOG.warning( - 'Webhook host is not specified in configuration ' - 'file and Senlin service endpoint can not be found,' - 'using local hostname (%(host)s) for webhook url.', - {'host': host}) - elif base.rfind("v1") == -1: - base = "%s/v1" % base - - if not base: - base = "http://%(h)s:%(p)s/v1" % {'h': host, 'p': port} - webhook = "/webhooks/%(id)s/trigger" % {'id': self.id} - - if self.params: - normalized = sorted(self.params.items(), key=lambda d: d[0]) - qstr = parse.urlencode(normalized) - url = "".join( - [base, webhook, '?V={}&'.format(self.WEBHOOK_VERSION), qstr]) - else: - url = "".join( - [base, webhook, '?V={}'.format(self.WEBHOOK_VERSION)]) - - self.channel = { - 'alarm_url': url - } - return self.channel diff --git a/senlin/engine/registry.py b/senlin/engine/registry.py deleted file mode 100644 index 50aec98d3..000000000 --- a/senlin/engine/registry.py +++ /dev/null @@ -1,143 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -class PluginInfo(object): - """Base mapping of plugin type to implementation.""" - - def __new__(cls, registry, name, plugin, **kwargs): - """Create a new PluginInfo of the appropriate class. - - Placeholder for class hierarchy extensibility - """ - return super(PluginInfo, cls).__new__(cls) - - def __init__(self, registry, name, plugin): - self.registry = registry - self.name = name - self.plugin = plugin - self.user_provided = True - - def __eq__(self, other): - if other is None: - return False - return (self.name == other.name and - self.plugin == other.plugin and - self.user_provided == other.user_provided) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - if self.user_provided != other.user_provided: - # user provided ones must be sorted above system ones. - return self.user_provided > other.user_provided - if len(self.name) != len(other.name): - # more specific (longer) name must be sorted above system ones. - return len(self.name) > len(other.name) - return self.name < other.name - - def __gt__(self, other): - return other.__lt__(self) - - def __str__(self): - return '[Plugin](User:%s) %s -> %s' % (self.user_provided, - self.name, str(self.plugin)) - - -class Registry(object): - """A registry for managing profile or policy classes.""" - - def __init__(self, registry_name, global_registry=None): - self.registry_name = registry_name - self._registry = {} - self.is_global = False if global_registry else True - self.global_registry = global_registry - - def _register_info(self, name, info): - """place the new info in the correct location in the registry. - - :param name: a string of plugin name. - :param info: reference to a PluginInfo data structure, deregister a - PluginInfo if specified as None. - """ - registry = self._registry - if info is None: - # delete this entry. - msg = "Removing %(item)s from registry" - LOG.warning(msg, {'item': name}) - registry.pop(name, None) - return - - if name in registry and isinstance(registry[name], PluginInfo): - if registry[name] == info: - return - details = { - 'name': name, - 'old': str(registry[name].plugin), - 'new': str(info.plugin) - } - LOG.warning('Changing %(name)s from %(old)s to %(new)s', - details) - else: - msg = 'Registering %(name)s -> %(value)s' - LOG.info(msg, {'name': name, 'value': info.plugin}) - - info.user_provided = not self.is_global - registry[name] = info - - def register_plugin(self, name, plugin): - pi = PluginInfo(self, name, plugin) - self._register_info(name, pi) - - def load(self, json_snippet): - for k, v in iter(json_snippet.items()): - if v is None: - self._register_info(k, None) - else: - self.register_plugin(k, v) - - def iterable_by(self, name): - plugin = self._registry.get(name) - if plugin: - yield plugin - - def get_plugin(self, name): - giter = [] - if not self.is_global: - giter = self.global_registry.iterable_by(name) - - matches = itertools.chain(self.iterable_by(name), giter) - infos = sorted(matches) - return infos[0].plugin if infos else None - - def as_dict(self): - return dict((k, v.plugin) for k, v in self._registry.items()) - - def get_types(self): - """Return a list of valid plugin types.""" - types_support = [] - for tn, ts in self._registry.items(): - name = tn.split('-')[0] if '-' in tn else tn - version = tn.split('-')[1] if '-' in tn else '' - support = ts.plugin.VERSIONS[version] if version != '' else '' - pi = {version: support} - types_support.append({'name': name, 'version': version, - 'support_status': pi}) - return types_support diff --git a/senlin/engine/senlin_lock.py b/senlin/engine/senlin_lock.py deleted file mode 100644 index 1d2394bee..000000000 --- a/senlin/engine/senlin_lock.py +++ /dev/null @@ -1,158 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import random -import time - -from oslo_config import cfg -from oslo_db import exception -from oslo_log import log as logging - -from senlin.common.i18n import _ -from senlin.common import utils -from senlin import objects -from senlin.objects import action as ao -from senlin.objects import cluster_lock as cl_obj -from senlin.objects import node_lock as nl_obj - -CONF = cfg.CONF - -CONF.import_opt('lock_retry_times', 'senlin.conf') -CONF.import_opt('lock_retry_interval', 'senlin.conf') - -LOG = logging.getLogger(__name__) - -LOCK_SCOPES = ( - CLUSTER_SCOPE, NODE_SCOPE, -) = ( - -1, 1, -) - - -def cluster_lock_acquire(context, cluster_id, action_id, engine=None, - scope=CLUSTER_SCOPE, forced=False): - """Try to lock the specified cluster. - - :param context: the context used for DB operations. - :param cluster_id: ID of the cluster to be locked. - :param action_id: ID of the action which wants to lock the cluster. - :param engine: ID of the engine which wants to lock the cluster. - :param scope: scope of lock, could be cluster wide lock, or node-wide - lock. - :param forced: set to True to cancel current action that owns the lock, - if any. - :returns: True if lock is acquired, or False otherwise. - """ - - # Step 1: try lock the cluster - if the returned owner_id is the - # action id, it was a success - for retries in range(3): - try: - owners = cl_obj.ClusterLock.acquire(cluster_id, action_id, scope) - if action_id in owners: - return True - except exception.DBDuplicateEntry: - LOG.info('Duplicate entry in cluster_lock table for %(c)s. ' - 'Retrying cluster lock.', - {'c': cluster_id}) - eventlet.sleep(random.randrange(1, 3)) - - # Step 2: Last resort is 'forced locking', only needed when retry failed - if forced: - owners = cl_obj.ClusterLock.steal(cluster_id, action_id) - return action_id in owners - - # Step 3: check if the owner is a dead engine, if so, steal the lock. - # Will reach here only because scope == CLUSTER_SCOPE - action = ao.Action.get(context, owners[0]) - if (action and action.owner and action.owner != engine and - utils.is_service_dead(context, action.owner)): - LOG.info('The cluster %(c)s is locked by dead action %(a)s, ' - 'try to steal the lock.', - {'c': cluster_id, 'a': owners[0]}) - dead_engine = action.owner - owners = cl_obj.ClusterLock.steal(cluster_id, action_id) - # Cleanse locks affected by the dead engine - objects.Service.gc_by_engine(dead_engine) - return action_id in owners - - lock_owners = [] - for o in owners: - lock_owners.append(o[:8]) - LOG.warning('Cluster is already locked by action %(old)s, ' - 'action %(new)s failed grabbing the lock', - {'old': str(lock_owners), 'new': action_id[:8]}) - - return False - - -def cluster_lock_release(cluster_id, action_id, scope): - """Release the lock on the specified cluster. - - :param cluster_id: ID of the cluster to be released. - :param action_id: ID of the action that attempts to release the cluster. - :param scope: The scope of the lock to be released. - """ - return cl_obj.ClusterLock.release(cluster_id, action_id, scope) - - -def node_lock_acquire(context, node_id, action_id, engine=None, - forced=False): - """Try to lock the specified node. - - :param context: the context used for DB operations. - :param node_id: ID of the node to be locked. - :param action_id: ID of the action that attempts to lock the node. - :param engine: ID of the engine that attempts to lock the node. - :param forced: set to True to cancel current action that owns the lock, - if any. - :returns: True if lock is acquired, or False otherwise. - """ - # Step 1: try lock the node - if the returned owner_id is the - # action id, it was a success - owner = nl_obj.NodeLock.acquire(node_id, action_id) - if action_id == owner: - return True - - # Step 2: Last resort is 'forced locking', only needed when retry failed - if forced: - owner = nl_obj.NodeLock.steal(node_id, action_id) - return action_id == owner - - # Step 3: Try to steal a lock if it's owner is a dead engine. - # if this node lock by dead engine - action = ao.Action.get(context, owner) - if (action and action.owner and action.owner != engine and - utils.is_service_dead(context, action.owner)): - LOG.info('The node %(n)s is locked by dead action %(a)s, ' - 'try to steal the lock.', - {'n': node_id, 'a': owner}) - reason = _('Engine died when executing this action.') - nl_obj.NodeLock.steal(node_id, action_id) - ao.Action.mark_failed(context, action.id, time.time(), reason) - return True - - LOG.warning('Node is already locked by action %(old)s, ' - 'action %(new)s failed grabbing the lock', - {'old': owner, 'new': action_id}) - - return False - - -def node_lock_release(node_id, action_id): - """Release the lock on the specified node. - - :param node_id: ID of the node to be released. - :param action_id: ID of the action that attempts to release the node. - """ - return nl_obj.NodeLock.release(node_id, action_id) diff --git a/senlin/engine/service.py b/senlin/engine/service.py deleted file mode 100644 index 837c7d070..000000000 --- a/senlin/engine/service.py +++ /dev/null @@ -1,187 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -import eventlet -from oslo_config import cfg -from oslo_context import context as oslo_context -from oslo_log import log as logging -import oslo_messaging -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import context -from senlin.common import messaging -from senlin.common import service -from senlin.engine.actions import base as action_mod -from senlin.engine import event as EVENT -from senlin.objects import action as ao - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -wallclock = time.time - - -@profiler.trace_cls("rpc") -class EngineService(service.Service): - """RPC server for dispatching actions. - - Receive notification from dispatcher services and schedule actions. - """ - - def __init__(self, host, topic): - super(EngineService, self).__init__( - self.service_name, host, topic, - threads=CONF.engine.threads - ) - self.version = consts.RPC_API_VERSION - - self.target = None - - # TODO(Yanyan Hu): Build a DB session with full privilege - # for DB accessing in scheduler module - self.db_session = context.RequestContext(is_admin=True) - - # Initialize the global environment - EVENT.load_dispatcher() - - @property - def service_name(self): - return 'senlin-engine' - - def start(self): - """Start the engine. - - Note that the engine is an internal server, we are not using - versioned object for parameter passing. - """ - super(EngineService, self).start() - - self.target = oslo_messaging.Target(server=self.service_id, - topic=self.topic, - version=self.version) - - self.server = messaging.get_rpc_server(self.target, self) - self.server.start() - - def stop(self, graceful=False): - if self.server: - self.server.stop() - self.server.wait() - super(EngineService, self).stop(graceful) - - def execute(self, func, *args, **kwargs): - """Run the given method in a thread.""" - req_cnxt = oslo_context.get_current() - self.tg.add_thread( - self._start_with_trace, req_cnxt, - self._serialize_profile_info(), - func, *args, **kwargs - ) - - def _serialize_profile_info(self): - prof = profiler.get() - trace_info = None - if prof: - trace_info = { - "hmac_key": prof.hmac_key, - "base_id": prof.get_base_id(), - "parent_id": prof.get_id() - } - return trace_info - - def _start_with_trace(self, cnxt, trace, func, *args, **kwargs): - if trace: - profiler.init(**trace) - if cnxt is not None: - cnxt.update_store() - return func(*args, **kwargs) - - def listening(self, ctxt): - """Respond affirmatively to confirm that engine is still alive.""" - return True - - def start_action(self, ctxt, action_id=None): - """Run action(s) in sub-thread(s). - - :param action_id: ID of the action to be executed. None means all - ready actions will be acquired and scheduled to run. - """ - actions_launched = 0 - max_batch_size = cfg.CONF.max_actions_per_batch - batch_interval = cfg.CONF.batch_interval - - if action_id is not None: - timestamp = wallclock() - action = ao.Action.acquire(self.db_session, action_id, - self.service_id, - timestamp) - if action: - self.execute(action_mod.ActionProc, self.db_session, action.id) - actions_launched += 1 - - while True: - timestamp = wallclock() - action = ao.Action.acquire_first_ready(self.db_session, - self.service_id, - timestamp) - if not action: - break - - if max_batch_size == 0 or 'NODE' not in action.action: - self.execute(action_mod.ActionProc, self.db_session, action.id) - continue - - if max_batch_size > actions_launched: - self.execute(action_mod.ActionProc, self.db_session, action.id) - actions_launched += 1 - continue - - self.execute(action_mod.ActionProc, self.db_session, action.id) - - LOG.debug( - 'Engine %(id)s has launched %(num)s node actions ' - 'consecutively, stop scheduling node action for ' - '%(interval)s second...', - { - 'id': self.service_id, - 'num': max_batch_size, - 'interval': batch_interval - }) - - sleep(batch_interval) - actions_launched = 1 - - def cancel_action(self, ctxt, action_id): - """Cancel an action execution progress.""" - action = action_mod.Action.load(self.db_session, action_id, - project_safe=False) - action.signal(action.SIG_CANCEL) - - def suspend_action(self, ctxt, action_id): - """Suspend an action execution progress.""" - action = action_mod.Action.load(self.db_session, action_id, - project_safe=False) - action.signal(action.SIG_SUSPEND) - - def resume_action(self, ctxt, action_id): - """Resume an action execution progress.""" - action = action_mod.Action.load(self.db_session, action_id, - project_safe=False) - action.signal(action.SIG_RESUME) - - -def sleep(sleep_time): - """Interface for sleeping.""" - - eventlet.sleep(sleep_time) diff --git a/senlin/events/__init__.py b/senlin/events/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/events/base.py b/senlin/events/base.py deleted file mode 100644 index 26dd25983..000000000 --- a/senlin/events/base.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import reflection - - -class EventBackend(object): - - @classmethod - def _check_entity(cls, e): - e_type = reflection.get_class_name(e, fully_qualified=False) - return e_type.upper() - - @classmethod - def _get_action_name(cls, action): - """Get action name by inference. - - :param action: An action object. - :returns: A string containing the inferred action name. - """ - name = action.action.split('_', 1) - if len(name) == 1: - return name[0].lower() - - name = name[1].lower() - if name == "operation": - name = action.inputs.get("operation", name) - return name - - @classmethod - def dump(cls, level, action, **kwargs): - """A method for sub-class to override. - - :param level: An integer as defined by python logging module. - :param action: The action that triggered this dump. - :param dict kwargs: Additional parameters such as ``phase``, - ``timestamp`` or ``extra``. - :returns: None - """ - raise NotImplementedError diff --git a/senlin/events/database.py b/senlin/events/database.py deleted file mode 100644 index bd3412309..000000000 --- a/senlin/events/database.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - -from senlin.common import consts -from senlin.events import base -from senlin.objects import event as eo - - -class DBEvent(base.EventBackend): - """DB driver for event dumping""" - - @classmethod - def dump(cls, level, action, **kwargs): - """Create an event record into database. - - :param level: An integer as defined by python logging module. - :param action: The action that triggered this dump. - :param dict kwargs: Additional parameters such as ``phase``, - ``timestamp`` or ``extra``. - """ - ctx = action.context - entity = action.entity - status = kwargs.get('phase') or entity.status - reason = kwargs.get('reason') or entity.status_reason - otype = cls._check_entity(entity) - cluster_id = entity.id if otype == 'CLUSTER' else entity.cluster_id - # use provided timestamp if any - timestamp = kwargs.get('timestamp') or timeutils.utcnow(True) - # use provided extra data if any - extra = kwargs.get("extra") or {} - - # Make a guess over the action name - action_name = action.action - if action_name in (consts.NODE_OPERATION, consts.CLUSTER_OPERATION): - action_name = action.inputs.get('operation', action_name) - - values = { - 'level': level, - 'timestamp': timestamp, - 'oid': entity.id, - 'otype': otype, - 'oname': entity.name, - 'cluster_id': cluster_id, - 'user': ctx.user_id, - 'project': ctx.project_id, - 'action': action_name, - 'status': status, - 'status_reason': reason, - 'meta_data': extra, - } - - eo.Event.create(ctx, values) diff --git a/senlin/events/message.py b/senlin/events/message.py deleted file mode 100644 index 352422b9e..000000000 --- a/senlin/events/message.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from senlin.common import utils -from senlin.events import base -from senlin.objects import notification as nobj - - -class MessageEvent(base.EventBackend): - """Message driver for event dumping""" - - @classmethod - def _notify_cluster_action(cls, ctx, level, cluster, action, **kwargs): - action_name = cls._get_action_name(action) - priority = utils.level_from_number(level).lower() - publisher = nobj.NotificationPublisher( - host=cfg.CONF.host, binary='senlin-engine') - publisher.obj_set_defaults() - phase = kwargs.get('phase') - event_type = nobj.EventType( - object='cluster', action=action_name, phase=phase) - payload = nobj.ClusterActionPayload(cluster, action) - notification = nobj.ClusterActionNotification( - context=ctx, priority=priority, publisher=publisher, - event_type=event_type, payload=payload) - notification.emit(ctx) - - @classmethod - def _notify_node_action(cls, ctx, level, node, action, **kwargs): - action_name = cls._get_action_name(action) - priority = utils.level_from_number(level).lower() - publisher = nobj.NotificationPublisher( - host=cfg.CONF.host, binary='senlin-engine') - publisher.obj_set_defaults() - phase = kwargs.get('phase') - event_type = nobj.EventType( - object='node', action=action_name, phase=phase) - payload = nobj.NodeActionPayload(node, action) - notification = nobj.NodeActionNotification( - context=ctx, priority=priority, publisher=publisher, - event_type=event_type, payload=payload) - notification.emit(ctx) - - @classmethod - def dump(cls, level, action, **kwargs): - """Dump the provided event into message queue. - - :param level: An integer as defined by python logging module. - :param action: An action object for the current operation. - :param dict kwargs: Other keyword arguments for the operation. - """ - ctx = action.context - entity = action.entity - etype = cls._check_entity(entity) - if etype == 'CLUSTER': - cls._notify_cluster_action(ctx, level, entity, action, **kwargs) - else: - cls._notify_node_action(ctx, level, entity, action, **kwargs) diff --git a/senlin/hacking/__init__.py b/senlin/hacking/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/hacking/checks.py b/senlin/hacking/checks.py deleted file mode 100644 index 1ecd1553e..000000000 --- a/senlin/hacking/checks.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from hacking import core - -asse_equal_end_with_none_re = re.compile(r"assertEqual\(.*?,\s+None\)$") -asse_equal_start_with_none_re = re.compile(r"assertEqual\(None,") -asse_equal_start_with_true_re = re.compile(r"assertEqual\(True,") -asse_equal_end_with_true_re = re.compile(r"assertEqual\(.*?,\s+True\)$") -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") -api_version_dec = re.compile(r"@.*api_version") -decorator_re = re.compile(r"@.*") - - -@core.flake8ext -def assert_equal_none(logical_line): - """Check for assertEqual(A, None) or assertEqual(None, A) sentences - - S318 - """ - res = (asse_equal_start_with_none_re.search(logical_line) or - asse_equal_end_with_none_re.search(logical_line)) - if res: - yield (0, "S318: assertEqual(A, None) or assertEqual(None, A) " - "sentences not allowed") - - -@core.flake8ext -def use_jsonutils(logical_line, filename): - msg = "S319: jsonutils.%(fun)s must be used instead of json.%(fun)s" - - if "json." in logical_line: - json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] - for f in json_funcs: - pos = logical_line.find('json.%s' % f) - if pos != -1: - yield (pos, msg % {'fun': f[:-1]}) - - -@core.flake8ext -def no_mutable_default_args(logical_line): - msg = "S320: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -@core.flake8ext -def no_log_warn(logical_line): - """Disallow 'LOG.warn(' - - Deprecated LOG.warn(), instead use LOG.warning - https://bugs.launchpad.net/senlin/+bug/1508442 - - S322 - """ - - msg = ("S322: LOG.warn is deprecated, please use LOG.warning!") - if "LOG.warn(" in logical_line: - yield (0, msg) - - -@core.flake8ext -def assert_equal_true(logical_line): - """Check for assertEqual(A, True) or assertEqual(True, A) sentences - - S323 - """ - res = (asse_equal_start_with_true_re.search(logical_line) or - asse_equal_end_with_true_re.search(logical_line)) - if res: - yield (0, "S323: assertEqual(A, True) or assertEqual(True, A) " - "sentences not allowed") - - -@core.flake8ext -def check_api_version_decorator(logical_line, previous_logical, blank_before, - filename): - msg = ("S321: The api_version decorator must be the first decorator on " - "a method.") - if (blank_before == 0 and re.match(api_version_dec, logical_line) and - re.match(decorator_re, previous_logical)): - yield(0, msg) diff --git a/senlin/health_manager/__init__.py b/senlin/health_manager/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/health_manager/service.py b/senlin/health_manager/service.py deleted file mode 100644 index a768d111e..000000000 --- a/senlin/health_manager/service.py +++ /dev/null @@ -1,140 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging -from oslo_utils import timeutils -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import context -from senlin.common import messaging as rpc -from senlin.common import service -from senlin.engine import health_manager - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -@profiler.trace_cls("rpc") -class HealthManagerService(service.Service): - def __init__(self, host, topic): - super(HealthManagerService, self).__init__( - self.service_name, host, topic, - threads=CONF.health_manager.threads - ) - self.version = consts.RPC_API_VERSION - - self.ctx = context.get_admin_context() - - # The following are initialized here and will be assigned in start() - # which happens after the fork when spawning multiple worker processes - self.health_registry = None - self.target = None - self.cleanup_task_timer = None - - @property - def service_name(self): - return 'senlin-health-manager' - - def start(self): - super(HealthManagerService, self).start() - - self.health_registry = health_manager.RuntimeHealthRegistry( - ctx=self.ctx, engine_id=self.service_id, - thread_group=self.tg - ) - self.target = oslo_messaging.Target(server=self.service_id, - topic=self.topic, - version=self.version) - self.server = rpc.get_rpc_server(self.target, self) - self.server.start() - - self.tg.add_dynamic_timer(self.task, None, cfg.CONF.periodic_interval) - self.cleanup_task_timer = self.tg.add_timer( - CONF.health_manager.cleanup_interval, self.cleanup_task, - initial_delay=CONF.health_manager.cleanup_interval - ) - - def stop(self, graceful=False): - if self.cleanup_task_timer: - self.cleanup_task_timer.stop() - self.cleanup_task_timer = None - if self.server: - self.server.stop() - self.server.wait() - super(HealthManagerService, self).stop(graceful) - - def task(self): - """Task that is queued on the health manager thread group. - - The task is here so that the service always has something to wait() - on, or else the process will exit. - """ - start_time = timeutils.utcnow(True) - - try: - self.health_registry.load_runtime_registry() - except Exception as ex: - LOG.error("Failed when loading runtime for health manager: %s", ex) - return health_manager.chase_up( - start_time, cfg.CONF.periodic_interval, name='Health manager task' - ) - - def cleanup_task(self): - LOG.debug('Running cleanup task') - try: - self.health_registry.cleanup_orphaned_healthchecks() - except Exception as ex: - LOG.error("Failed to run cleanup tasks for health manager: %s", ex) - - def listening(self, ctx): - """Respond to confirm that the rpc service is still alive.""" - return True - - def register_cluster(self, ctx, cluster_id, interval=None, - node_update_timeout=None, params=None, - enabled=True): - """Register a cluster for health checking. - - :param ctx: The context of notify request. - :param cluster_id: The ID of the cluster to be unregistered. - :param interval: Interval of the health check. - :param node_update_timeout: Time to wait before declairing a node - unhealthy. - :param params: Params to be passed to health check. - :param enabled: Set's if the health check is enabled or disabled. - :return: None - """ - LOG.info("Registering health check for cluster %s.", cluster_id) - self.health_registry.register_cluster( - cluster_id=cluster_id, - interval=interval, - node_update_timeout=node_update_timeout, - params=params, - enabled=enabled) - - def unregister_cluster(self, ctx, cluster_id): - """Unregister a cluster from health checking. - - :param ctx: The context of notify request. - :param cluster_id: The ID of the cluster to be unregistered. - :return: None - """ - LOG.info("Unregistering health check for cluster %s.", cluster_id) - self.health_registry.unregister_cluster(cluster_id) - - def enable_cluster(self, ctx, cluster_id, params=None): - self.health_registry.enable_cluster(cluster_id) - - def disable_cluster(self, ctx, cluster_id, params=None): - self.health_registry.disable_cluster(cluster_id) diff --git a/senlin/locale/de/LC_MESSAGES/senlin.po b/senlin/locale/de/LC_MESSAGES/senlin.po deleted file mode 100644 index 9193b8285..000000000 --- a/senlin/locale/de/LC_MESSAGES/senlin.po +++ /dev/null @@ -1,1897 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Frank Kloeker , 2018. #zanata -# Andreas Jaeger , 2020. #zanata -msgid "" -msgstr "" -"Project-Id-Version: senlin VERSION\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2022-05-24 19:41+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2020-04-25 11:33+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 4.3.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "\"%s\" is not a List" -msgstr "'%s' ist keine Liste" - -#, python-format -msgid "%(feature)s is not supported." -msgstr "%(feature)s wird nicht unterstützt." - -#, python-format -msgid "" -"%(key)s (max_version=%(max)s) is not supported by spec version %(version)s." -msgstr "" -"%(key)s (max_version=%(max)s) wird von der Spezifikationsversion %(version)s " -"nicht unterstützt." - -#, python-format -msgid "" -"%(key)s (min_version=%(min)s) is not supported by spec version %(version)s." -msgstr "" -"%(key)s (min_version=%(min)s) wird von der Spezifikationsversion %(version)s " -"nicht unterstützt." - -#, python-format -msgid "%(message)s" -msgstr "%(message)s" - -#, python-format -msgid "%(msg)s." -msgstr "%(msg)s." - -#, python-format -msgid "%(user)s is not allowed to trigger actions on cluster %(cid)s." -msgstr "%(user)s darf keine Aktionen auf Cluster %(cid)s auslösen." - -#, python-format -msgid "%s type name is not a string" -msgstr "%s-Typname ist keine Zeichenfolge" - -#, python-format -msgid "%s type name not specified" -msgstr "%s Typenname nicht angegeben" - -#, python-format -msgid "'%(other)s' must be an instance of '%(cls)s'" -msgstr "'%(other)s' muss eine Instanz von '%(cls)s' sein" - -#, python-format -msgid "'%(value)s' must be one of the allowed values: %(allowed)s" -msgstr "'%(value)s' muss einer der erlaubten Werte sein: %(allowed)s" - -#, python-format -msgid "'%s' is not a List" -msgstr "'%s' ist keine Liste" - -#, python-format -msgid "'%s' is not a Map" -msgstr "'%s' ist keine Map" - -msgid "A boolean specifying whether a stack operation can be rolled back." -msgstr "" -"Ein boolescher Wert, der angibt, ob eine Stapeloperation zurückgesetzt " -"werden kann." - -msgid "" -"A collection of key/value pairs to be associated with the Scheduler hints. " -"Both key and value must be <=255 chars." -msgstr "" -"Eine Auflistung von Schlüssel/Wert-Paaren, die den Scheduler-Hinweisen " -"zugeordnet werden sollen. Schlüssel und Wert müssen <= 255 Zeichen sein." - -msgid "" -"A collection of key/value pairs to be associated with the server created. " -"Both key and value must be <=255 chars." -msgstr "" -"Eine Auflistung von Schlüssel/Wert-Paaren, die dem erstellten Server " -"zugeordnet werden sollen. Schlüssel und Wert müssen <= 255 Zeichen sein." - -msgid "A dictionary for specifying the customized context for stack operations" -msgstr "" -"Ein Wörterbuch zum Festlegen des benutzerdefinierten Kontexts für " -"Stapeloperationen" - -msgid "" -"A integer that specifies the number of minutes that a stack operation times " -"out." -msgstr "" -"Eine ganze Zahl, die die Anzahl der Minuten angibt, die eine Stapeloperation " -"abläuft." - -msgid "A list of security groups to be attached to this port." -msgstr "" -"Eine Liste der Sicherheitsgruppen, die an diesen Port angeschlossen werden " -"sollen." - -msgid "" -"A list specifying the properties of block devices to be used for this server." -msgstr "" -"Eine Liste, die die Eigenschaften von Blockgeräten angibt, die für diesen " -"Server verwendet werden sollen." - -msgid "A map specifying the path & contents for an injected file." -msgstr "" -"Eine Map, die den Pfad und den Inhalt für eine injizierte Datei angibt." - -msgid "" -"A map specifying the properties of a block device to be used by the server." -msgstr "" -"Eine Karte, die die Eigenschaften eines vom Server zu verwendenden " -"Blockgeräts angibt." - -msgid "A map specifying the properties of a network for uses." -msgstr "" -"Eine Map, die die Eigenschaften eines Netzwerks für Verwendungen angibt." - -msgid "A map that specifies the environment used for stack operations." -msgstr "" -"Eine Zuordnung, die die für Stapeloperationen verwendete Umgebung angibt." - -msgid "A number specifying the amount of adjustment." -msgstr "Eine Zahl, die den Anpassungsbetrag angibt." - -#, python-format -msgid "A policy named '%(name)s' already exists." -msgstr "Eine Richtlinie mit dem Namen '%(name)s' existiert bereits." - -#, python-format -msgid "A profile named '%(name)s' already exists." -msgstr "Ein Profil namens '%(name)s' existiert bereits." - -#, python-format -msgid "A receiver named '%s' already exists." -msgstr "Ein Empfänger namens '%s' existiert bereits." - -msgid "A string referencing the image to use." -msgstr "Eine Zeichenfolge, die auf das zu verwendende Abbild verweist." - -msgid "A text description of policy." -msgstr "Eine Textbeschreibung der Richtlinie." - -#, python-format -msgid "" -"API Version String '%(version)s' is of invalid format. It must be of format " -"'major.minor'." -msgstr "" -"API-Version String '%(version)s' hat ein ungültiges Format. Es muss das " -"Format 'major.minor' haben." - -#, python-format -msgid "API version '%(version)s' is not supported on this method." -msgstr "" -"Die API-Version '%(version)s' wird für diese Methode nicht unterstützt." - -msgid "Abandon a heat stack node." -msgstr "Verlassen Sie einen Heat Stack-Knoten." - -#, python-format -msgid "Action name cannot be any of %s." -msgstr "Der Aktionsname darf nicht %s sein." - -msgid "Action name is required for creating webhook receiver." -msgstr "Der Aktionsname wird zum Erstellen des Webhook-Empfängers benötigt." - -#, python-format -msgid "Action parameter %s is not recognizable." -msgstr "Aktionsparameter %s ist nicht erkennbar." - -msgid "Action to try for node recovery." -msgstr "Aktion für die Wiederherstellung des Knotens." - -msgid "" -"Address to bind the server. Useful when selecting a particular network " -"interface." -msgstr "" -"Adresse zum Binden des Servers. Nützlich bei der Auswahl einer bestimmten " -"Netzwerkschnittstelle." - -msgid "Administrative state of the VIP." -msgstr "Verwaltungsstatus des VIPs." - -msgid "Administrative state of the health monitor." -msgstr "Verwaltungszustand des Gesundheitsmonitors" - -msgid "Administrative state of the pool." -msgstr "Verwaltungszustand des Pools" - -msgid "Age must be a positive integer." -msgstr "Alter muss eine positive Ganzzahl sein." - -#, python-format -msgid "Allowed values: %s" -msgstr "Erlaubte Werte: %s" - -msgid "AllowedValues must be a list or a string" -msgstr "AllowedValues muss eine Liste oder eine Zeichenfolge sein" - -msgid "An availability zone as candidate." -msgstr "Eine Verfügbarkeitszone als Kandidat." - -msgid "An region as a candidate." -msgstr "Eine Region als Kandidat." - -msgid "An unknown exception occurred." -msgstr "Eine unbekannte Ausnahme ist aufgetreten." - -msgid "Batching request validated." -msgstr "Batching-Anfrage validiert" - -msgid "Binary" -msgstr "Binär" - -msgid "Both cluster identity and action must be specified." -msgstr "" -"Sowohl die Clusteridentität als auch die Aktion müssen angegeben werden." - -#, python-format -msgid "Both template and template_url are not specified for profile '%s'." -msgstr "" -"Sowohl die Vorlage als auch template_url sind für das Profil '%s' nicht " -"angegeben." - -msgid "Bus of the device." -msgstr "Bus des Geräts." - -msgid "Candidates generated" -msgstr "Kandidaten generiert" - -#, python-format -msgid "Cannot find the given cluster: %s" -msgstr "Der angegebene Cluster kann nicht gefunden werden:%s" - -msgid "Cannot update a cluster to a different profile type, operation aborted." -msgstr "" -"Ein Cluster kann nicht auf einen anderen Profiltyp aktualisiert werden, der " -"Vorgang wurde abgebrochen." - -msgid "Cannot update a node to a different profile type, operation aborted." -msgstr "" -"Ein Knoten kann nicht auf einen anderen Profiltyp aktualisiert werden, der " -"Vorgang wurde abgebrochen." - -msgid "Change the administrator password." -msgstr "Ändern Sie das Administratorkennwort." - -#, python-format -msgid "Cluster (%(cid)s) cannot be found." -msgstr "Cluster (%(cid)s) kann nicht gefunden werden." - -msgid "Cluster (c1) cannot be found." -msgstr "Cluster (c1) kann nicht gefunden werden." - -msgid "Cluster identity is required for creating webhook receiver." -msgstr "" -"Die Cluster-Identität wird für die Erstellung des Webhook-Empfängers " -"benötigt." - -msgid "Configuration options for zaqar trustee." -msgstr "Konfigurationsoptionen für den zaqar-Treuhänder" - -msgid "Contents of files referenced by the template, if any." -msgstr "Inhalt von Dateien, auf die die Vorlage verweist, falls vorhanden." - -msgid "Contents of the file to be injected." -msgstr "Inhalt der zu injizierenden Datei." - -#, python-format -msgid "Could not bind to %(bind_addr)s after trying 30 seconds" -msgstr "" -"Nach dem Versuch von 30 Sekunden konnte nicht an %(bind_addr)s gebunden " -"werden" - -msgid "Count for scale-in request cannot be 0." -msgstr "Die Anzahl der Scale-In-Anforderungen kann nicht 0 sein." - -msgid "Count for scale-out request cannot be 0." -msgstr "Die Anzahl der Scale-Out-Anforderungen kann nicht 0 sein." - -msgid "Created At" -msgstr "Hergestellt in" - -msgid "Criteria used in selecting candidates for deletion" -msgstr "" -"Kriterien, die bei der Auswahl von Kandidaten zum Löschen verwendet werden" - -msgid "Customized security context for operating containers." -msgstr "Angepasster Sicherheitskontext zum Bedienen von Containern" - -msgid "Customized security context for operating servers." -msgstr "Angepasster Sicherheitskontext für den Betrieb von Servern." - -#, python-format -msgid "Dead service %s is removed." -msgstr "Der tote Service %s wurde entfernt." - -msgid "Default cloud backend to use." -msgstr "Zu verwendendes Standard-Cloud-Backend" - -msgid "Default region name used to get services endpoints." -msgstr "" -"Name der Standardregion, der zum Abrufen von Dienstendpunkten verwendet wird." - -msgid "Define the boot order of the device" -msgstr "Definieren Sie die Startreihenfolge des Geräts" - -msgid "Detailed specification for scaling adjustments." -msgstr "Detaillierte Spezifikation für Skalierungseinstellungen." - -#, python-format -msgid "Driver plugin %(name)s is not found." -msgstr "Treiber-Plugin %(name)s wurde nicht gefunden." - -#, python-format -msgid "Either '%(c)s' or '%(n)s' must be specified, but not both." -msgstr "" -"Es muss entweder '%(c)s' oder '%(n)s' angegeben werden, aber nicht beides." - -#, python-format -msgid "Either '%(c)s' or '%(n)s' must be specified." -msgstr "Entweder '%(c)s' oder '%(n)s' muss angegeben werden." - -msgid "Enable vSphere DRS extension." -msgstr "Aktivieren Sie die vSphere DRS-Erweiterung." - -#, python-format -msgid "Endpoint plugin %(name)s is not found." -msgstr "Endpoint-Plugin %(name)s wurde nicht gefunden." - -msgid "Engine died when executing this action." -msgstr "Beim Ausführen dieser Aktion ist die Engine abgestürzt." - -msgid "Enum field only support string values." -msgstr "Enum-Feld unterstützt nur String-Werte." - -#, python-format -msgid "Error parsing input: %s" -msgstr "Fehler beim Parsen der Eingabe:%s" - -msgid "Evacuate the server to a different host." -msgstr "Evakuiere den Server auf einen anderen Host." - -msgid "Event dispatchers to enable." -msgstr "Event-Dispatcher aktivieren." - -msgid "" -"Event that will trigger this policy. Must be one of CLUSTER_SCALE_IN and " -"CLUSTER_SCALE_OUT." -msgstr "" -"Ereignis, das diese Richtlinie auslöst Muss einer von CLUSTER_SCALE_IN und " -"CLUSTER_SCALE_OUT sein." - -msgid "Exchange name for heat notifications." -msgstr "Name für Wärmebenachrichtigungen austauschen" - -msgid "Exchange name for nova notifications." -msgstr "Exchange-Name für Nova-Benachrichtigungen" - -msgid "Exclude derived actions from events dumping." -msgstr "Eingeschlossene Aktionen von Ereignissen ausschließen" - -msgid "Expected HTTP codes for a passing HTTP(S) monitor." -msgstr "Erwartete HTTP-Codes für einen übergehenden HTTP (S) -Monitor." - -#, python-format -msgid "Failed in %(op)s %(type)s '%(id)s': %(message)s." -msgstr "Fehlgeschlagen in %(op)s %(type)s '%(id)s': %(message)s." - -msgid "Failed in adding node into lb pool." -msgstr "Fehler beim Hinzufügen eines Knotens zum lb-Pool." - -#, python-format -msgid "Failed in adding nodes into lb pool: %s" -msgstr "Fehler beim Hinzufügen von Knoten zu lb pool: %s" - -#, python-format -msgid "Failed in creating %(type)s: %(message)s." -msgstr "Fehler beim Erstellen von %(type)s: %(message)s." - -#, python-format -msgid "Failed in creating health monitor (%s)." -msgstr "Fehler beim Erstellen des Integritätsmonitors (%s)." - -msgid "Failed in creating lb health monitor: CREATE FAILED." -msgstr "Fehler beim Erstellen des lb-Gesundheitsmonitors: CREATE FAILED." - -msgid "Failed in creating lb listener: CREATE FAILED." -msgstr "Fehler beim Erstellen von lb-Listener: CREATE FAILED." - -msgid "Failed in creating lb pool: CREATE FAILED." -msgstr "Fehler beim Erstellen des lb-Pools: CREATE FAILED." - -#, python-format -msgid "Failed in creating listener (%s)." -msgstr "Fehler beim Erstellen des Listeners (%s)." - -#, python-format -msgid "Failed in creating loadbalancer (%s)." -msgstr "Fehler beim Erstellen des Loadbalancers (%s)." - -msgid "Failed in creating loadbalancer: CREATE FAILED." -msgstr "Fehler beim Erstellen von loadbalancer: CREATE FAILED." - -#, python-format -msgid "Failed in creating pool (%s)." -msgstr "Fehler beim Erstellen des Pools (%s)." - -#, python-format -msgid "Failed in creating profile %(name)s: %(error)s" -msgstr "Fehler beim Erstellen des Profils %(name)s: %(error)s" - -msgid "Failed in creating servergroup." -msgstr "Fehler beim Erstellen der Servergruppe." - -#, python-format -msgid "Failed in deleting %(type)s '%(id)s': %(message)s." -msgstr "Fehler beim Löschen von %(type)s '%(id)s': %(message)s." - -msgid "Failed in deleting healthmonitor: DELETE FAILED." -msgstr "Fehler beim Löschen von 'healthmonitor': DELETE FAILED." - -msgid "Failed in deleting lb pool: DELETE FAILED." -msgstr "Fehler beim Löschen des lb-Pools: DELETE FAILED." - -msgid "Failed in deleting listener: DELETE FAILED." -msgstr "Fehler beim Löschen des Listeners: DELETE FAILED." - -msgid "Failed in deleting servergroup." -msgstr "Fehler beim Löschen der Servergruppe." - -#, python-format -msgid "Failed in found %(type)s '%(id)s': %(message)s." -msgstr "Fehlgeschlagen gefunden in %(type)s '%(id)s': %(message)s." - -msgid "Failed in getting subnet: GET FAILED." -msgstr "Fehler beim Abrufen des Subnetzes: GET FAILED." - -#, python-format -msgid "Failed in removing deleted node(s) from lb pool: %s" -msgstr "Fehler beim Entfernen gelöschter Knoten aus dem lb-Pool: %s" - -msgid "Failed in removing node from lb pool." -msgstr "Fehler beim Entfernen des Knotens aus dem lb Pool." - -#, python-format -msgid "Failed in retrieving servergroup '%s'." -msgstr "Fehler beim Abrufen der Servergruppe '%s'." - -#, python-format -msgid "Failed in updating %(type)s '%(id)s': %(message)s." -msgstr "Fehler beim Aktualisieren von %(type)s '%(id)s' :%(message)s." - -#, python-format -msgid "Failed in validating template: %s" -msgstr "Fehler beim Überprüfen der Vorlage: %s" - -msgid "Failed to remove servers from existed LB." -msgstr "Fehler beim Entfernen von Servern aus vorhandener LB." - -#, python-format -msgid "Failed to retrieve data: %s" -msgstr "Fehler beim Abrufen der Daten: %s" - -#, python-format -msgid "Filter key '%s' is unsupported" -msgstr "Filtertaste '%s' wird nicht unterstützt" - -msgid "Fixed IP to be used by the network." -msgstr "Feste IP, die vom Netzwerk verwendet werden soll." - -msgid "" -"Flag to indicate whether to enforce unique names for Senlin objects " -"belonging to the same project." -msgstr "" -"Flag, um anzugeben, ob eindeutige Namen für Senlin-Objekte erzwungen werden " -"sollen, die zu demselben Projekt gehören." - -msgid "Health monitor for loadbalancer." -msgstr "Gesundheitsmonitor für Loadbalancer." - -msgid "Heat stack template url." -msgstr "Heat-Stack Vorlagen-URL." - -msgid "Heat stack template." -msgstr "Heat-Stack-Vorlage." - -msgid "Host" -msgstr "Gastgeber" - -msgid "ID of flavor used for the server." -msgstr "ID der Variante, die für den Server verwendet wird." - -msgid "ID of image to be used for the new server." -msgstr "ID des Abbildes, das für den neuen Server verwendet werden soll." - -msgid "ID of pool for the cluster on which nodes can be connected." -msgstr "" -"ID des Pools für den Cluster, auf dem die Knoten verbunden werden können." - -msgid "ID of the health manager for the loadbalancer." -msgstr "ID des Gesundheitsmanagers für den Loadbalancer." - -msgid "ID of the source image, snapshot or volume" -msgstr "ID des Quellabbildes, Schattenkopie oder Datenträger" - -msgid "IP address of the VIP." -msgstr "IP-Adresse des VIPs" - -msgid "If false, closes the client socket explicitly." -msgstr "Wenn false, wird der Client-Socket explizit geschlossen." - -#, python-format -msgid "Illegal cluster action '%s' specified." -msgstr "Illegale Clusteraktion '%s' angegeben" - -msgid "Illegal cluster action 'foo' specified." -msgstr "Illegale Clusteraktion 'foo' angegeben." - -msgid "In-instance path for the file to be injected." -msgstr "In-Instance-Pfad für die zu injizierende Datei." - -msgid "Internal error happened" -msgstr "Interner Fehler ist aufgetreten" - -msgid "Interval in seconds between update batches if any." -msgstr "Intervall in Sekunden zwischen den Update-Batches, falls vorhanden." - -#, python-format -msgid "Invalid URL scheme %s" -msgstr "Ungültiges URL-Schema %s" - -#, python-format -msgid "Invalid attribute path - %s" -msgstr "Ungültiger Attributpfad - %s" - -#, python-format -msgid "Invalid content type %(content_type)s" -msgstr "Ungültiger Inhaltstyp %(content_type)s" - -#, python-format -msgid "Invalid count (%(c)s) for action '%(a)s'." -msgstr "Ungültige Anzahl (%(c)s) für die Aktion '%(a)s'." - -#, python-format -msgid "Invalid default %(default)s: %(exc)s" -msgstr "Ungültiger Standard %(default)s: %(exc)s" - -#, python-format -msgid "Invalid parameter %s" -msgstr "Ungültige Parameter %s" - -#, python-format -msgid "Invalid parameter '%s'" -msgstr "Ungültige Parameter '%s'" - -#, python-format -msgid "Invalid value '%(value)s' specified for '%(name)s'" -msgstr "Ungültiger Wert '%(value)s' für '%(name)s' angegeben" - -#, python-format -msgid "Items for '%(attr)s' must be unique" -msgstr "Elemente für '%(attr)s' müssen eindeutig sein" - -#, python-format -msgid "" -"JSON body size (%(len)s bytes) exceeds maximum allowed size (%(limit)s " -"bytes)." -msgstr "" -"Die JSON-Körpergröße (%(len)s Bytes) überschreitet die maximal zulässige " -"Größe (%(limit)s Byte)." - -msgid "LB deletion succeeded" -msgstr "LB-Löschung erfolgreich" - -msgid "LB pool properties." -msgstr "LB-Pooleigenschaften" - -msgid "LB resources deletion succeeded." -msgstr "Löschen von LB-Ressourcen erfolgreich" - -msgid "Lifecycle hook properties" -msgstr "Lebenszyklus-Hook-Eigenschaften" - -msgid "List of actions to try for node recovery." -msgstr "" -"Liste der Aktionen, die für die Knotenwiederherstellung ausgeführt werden " -"sollen." - -msgid "List of availability zones to choose from." -msgstr "Liste der verfügbaren Zonen zur Auswahl." - -msgid "List of files to be injected into the server, where each." -msgstr "Liste der Dateien, die in den Server injiziert werden sollen." - -msgid "List of networks for the server." -msgstr "Liste der Netzwerke für den Server." - -msgid "List of regions to choose from." -msgstr "Liste der Regionen zur Auswahl." - -msgid "List of security groups." -msgstr "Liste der Sicherheitsgruppen" - -msgid "List of services to be fenced." -msgstr "Liste der Dienste, die eingezäunt werden sollen." - -msgid "Load balancing algorithm." -msgstr "Lastenausgleichsalgorithmus" - -msgid "Location of the SSL certificate file to use for SSL mode." -msgstr "Speicherort der SSL-Zertifikatsdatei für den SSL-Modus" - -msgid "Location of the SSL key file to use for enabling SSL mode." -msgstr "" -"Speicherort der SSL-Schlüsseldatei, die zum Aktivieren des SSL-Modus " -"verwendet werden soll." - -msgid "Lock the server." -msgstr "Sperren Sie den Server." - -msgid "Lowest event priorities to be dispatched." -msgstr "Niedrigste Ereignisprioritäten, die gesendet werden sollen." - -msgid "Malformed request data, missing 'action' key in request body." -msgstr "" -"Fehlgeschlagene Anfragedaten, fehlender 'action' Schlüssel im Anfragetext." - -msgid "Malformed request data, missing 'cluster' key in request body." -msgstr "Fehlerhafte Anfragedaten, fehlender Clusterschlüssel im Anfragetext." - -msgid "Malformed request data, missing 'node' key in request body." -msgstr "" -"Fehlgeschlagene Anfragedaten, fehlender Knotenschlüssel im Anfragetext." - -msgid "Malformed request data, missing 'policy' key in request body." -msgstr "" -"Fehlerhafte Anforderungsdaten, fehlender 'Richtlinienschlüssel' im " -"Anfragetext." - -msgid "Malformed request data, missing 'profile' key in request body." -msgstr "" -"Fehlgeschlagene Anfragedaten, fehlender 'Profil'-Schlüssel im Anfragetext." - -msgid "Malformed request data, missing 'receiver' key in request body." -msgstr "" -"Fehlgeschlagene Anfragedaten, fehlender Empfängerschlüssel im Anfragetext." - -msgid "Map contains duplicated values" -msgstr "Map enthält doppelte Werte" - -msgid "" -"Maximum line size of message headers to be accepted. max_header_line may " -"need to be increased when using large tokens (typically those generated by " -"the Keystone v3 API with big service catalogs)." -msgstr "" -"Maximale Zeilengröße von Nachrichtenheadern, die akzeptiert werden sollen. " -"max_header_line muss möglicherweise erhöht werden, wenn große Token " -"verwendet werden (normalerweise solche, die von der Keystone v3-API mit " -"großen Servicekatalogen generiert werden)." - -msgid "Maximum nodes allowed per top-level cluster." -msgstr "Maximal zulässige Anzahl von Knoten pro Cluster auf oberster Ebene." - -msgid "Maximum number of clusters any one project may have active at one time." -msgstr "" -"Maximale Anzahl an Clustern, die ein Projekt gleichzeitig aktiv sein kann." - -msgid "Maximum number of connections per second allowed for this VIP" -msgstr "" -"Maximale Anzahl an Verbindungen pro Sekunde, die für diesen VIP zulässig sind" - -msgid "" -"Maximum number of node actions that each engine worker can schedule " -"consecutively per batch. 0 means no limit." -msgstr "" -"Maximale Anzahl von Knotenaktionen, die jeder Engine Worker nacheinander pro " -"Batch einplanen kann. 0 bedeutet keine Begrenzung." - -msgid "" -"Maximum number of nodes in this region. The default is -1 which means no cap " -"set." -msgstr "" -"Maximale Anzahl von Knoten in dieser Region Der Standardwert ist -1, was " -"bedeutet, dass kein Cap gesetzt ist." - -msgid "Maximum number of nodes that will be updated in parallel." -msgstr "Maximale Anzahl der Knoten, die parallel aktualisiert werden." - -msgid "Maximum raw byte size of JSON request body." -msgstr "Maximale Raw-Byte-Größe des JSON-Anforderungshauptteils" - -msgid "Maximum raw byte size of data from web response." -msgstr "Maximale Byte-Rohgröße der Daten aus der Web-Antwort." - -msgid "Maximum seconds between cluster check to be called." -msgstr "Maximale Zeit zwischen dem zu prüfenden Cluster-Check." - -msgid "Maximum time since last check-in for a service to be considered up." -msgstr "" -"Maximale Zeit seit dem letzten Check-In für einen Service, der " -"berücksichtigt werden muss." - -msgid "Message body is empty." -msgstr "Der Nachrichtentext ist leer." - -msgid "Minimum number of nodes in service when performing updates." -msgstr "" -"Minimale Anzahl von Knoten, die beim Ausführen von Aktualisierungen in " -"Betrieb sind." - -msgid "Missing adjustment_type value for size adjustment." -msgstr "Fehlender Wert von adjustment_type für die Größenanpassung." - -msgid "Missing number value for size adjustment." -msgstr "Fehlender Zahlenwert für die Größenanpassung." - -#, python-format -msgid "Missing sort key for '%s'." -msgstr "Fehlender Sortierschlüssel für '%s'." - -msgid "Multiple actions specified" -msgstr "Mehrere Aktionen angegeben" - -msgid "Multiple actions specified." -msgstr "Mehrere Aktionen angegeben" - -msgid "Multiple operations specified" -msgstr "Mehrere Vorgänge angegeben" - -msgid "Multiple operations specified." -msgstr "Mehrere Vorgänge angegeben" - -#, python-format -msgid "" -"Multiple results found matching the query criteria '%(arg)s'. Please be more " -"specific." -msgstr "" -"Es wurden mehrere Ergebnisse gefunden, die den Suchkriterien '%(arg)s' " -"entsprechen. Seien Sie bitte spezifischer." - -msgid "Must specify a network to create floating IP" -msgstr "Muss ein Netzwerk angeben, um Floating IP zu erstellen" - -msgid "Name of Nova keypair to be injected to server." -msgstr "Name des Nova-Schlüsselpaars, das in den Server injiziert werden soll." - -msgid "Name of a region." -msgstr "Name einer Region" - -msgid "Name of a security group" -msgstr "Name einer Sicherheitsgruppe" - -msgid "Name of action to execute." -msgstr "Name der auszuführenden Aktion" - -msgid "Name of an availability zone." -msgstr "Name einer Verfügbarkeitszone" - -msgid "Name of availability zone for running the server." -msgstr "Name der Verfügbarkeitszone zum Ausführen des Servers." - -msgid "Name of cookie if type set to APP_COOKIE." -msgstr "Name des Cookies, wenn der Typ auf APP_COOKIE gesetzt ist." - -msgid "Name of the availability zone to place the nodes." -msgstr "Name der Verfügbarkeitszone zum Platzieren der Knoten." - -msgid "Name of the device(e.g. vda, xda, ....)." -msgstr "Name des Gerätes (z.B. vda, xda, ....)." - -msgid "Name of the domain for the service project." -msgstr "Name der Domäne für das Serviceprojekt" - -msgid "Name of the domain for the service user." -msgstr "Name der Domäne für den Servicebenutzer" - -msgid "" -"Name of the engine node. This can be an opaque identifier. It is not " -"necessarily a hostname, FQDN or IP address." -msgstr "" -"Name des Motorknotens Dies kann eine undurchsichtige Kennung sein. Es ist " -"nicht unbedingt ein Hostname, FQDN oder IP-Adresse." - -msgid "Name of the policy type." -msgstr "Name des Richtlinientyps" - -msgid "Name of the profile type." -msgstr "Name des Profiltyps" - -msgid "Name of the server. When omitted, the node name will be used." -msgstr "Name des Servers. Ohne Angabe wird der Knotenname verwendet." - -msgid "Name of the service project." -msgstr "Name des Serviceprojekts" - -msgid "" -"Name or ID of loadbalancer for the cluster on which nodes can be connected." -msgstr "" -"Name oder ID des Loadbalancers für den Cluster, an den Knoten angeschlossen " -"werden können." - -msgid "Name or ID of network to create a port on." -msgstr "Name oder ID des Netzwerks, auf dem ein Port erstellt werden soll." - -msgid "Name or ID of subnet for the port on which nodes can be connected." -msgstr "" -"Name oder ID des Subnetzes für den Port, an den die Knoten angeschlossen " -"werden können." - -msgid "New password for the administrator." -msgstr "Neues Passwort für den Administrator" - -msgid "No action name specified" -msgstr "Kein Aktionsname angegeben" - -msgid "No action specified" -msgstr "Keine Aktion angegeben" - -msgid "No action specified." -msgstr "Keine Aktion angegeben" - -msgid "No availability zone found available." -msgstr "Keine Verfügbarkeitszone gefunden verfügbar." - -msgid "No list of valid values provided for enum." -msgstr "Keine Liste gültiger Werte für die Enumeration." - -msgid "No node (matching the filter) could be found" -msgstr "Kein Knoten (passend zum Filter) konnte gefunden werden" - -msgid "No operation specified" -msgstr "Keine Operation angegeben" - -msgid "No operation specified." -msgstr "Keine Operation angegeben" - -msgid "No property needs an update." -msgstr "Keine Eigenschaft benötigt ein Update." - -msgid "No region is found usable." -msgstr "Keine Region wird als verwendbar befunden." - -msgid "No suitable vSphere host is available." -msgstr "Es ist kein geeigneter vSphere-Host verfügbar." - -msgid "No target specified" -msgstr "Kein Ziel angegeben" - -msgid "Node and cluster have different profile type, operation aborted." -msgstr "" -"Knoten und Cluster haben einen anderen Profiltyp, Operation abgebrochen." - -#, python-format -msgid "Nodes %s already member of a cluster." -msgstr "Knoten %s ist bereits Mitglied eines Clusters." - -msgid "Nodes ['NEW1'] already member of a cluster." -msgstr "Knoten ['NEW1'] ist bereits Mitglied eines Clusters." - -msgid "Nodes ['NODE2'] already owned by some cluster." -msgstr "Knoten ['NODE2'] gehören bereits zu einigen Clustern." - -#, python-format -msgid "Nodes are not ACTIVE: %s." -msgstr "Knoten sind nicht AKTIV: %s." - -msgid "Nodes are not ACTIVE: ['NEW1']." -msgstr "Knoten sind nicht AKTIV: ['NEW1']." - -msgid "Nodes are not ACTIVE: ['NODE2']." -msgstr "Knoten sind nicht AKTIV: ['NODE2']." - -msgid "Nodes not found:" -msgstr "Knoten nicht gefunden:" - -#, python-format -msgid "Nodes not found: %s." -msgstr "Knoten nicht gefunden: %s." - -#, python-format -msgid "Nodes not members of specified cluster: %s." -msgstr "Knoten, die nicht Mitglieder des angegebenen Clusters sind: %s." - -msgid "Nodes not members of specified cluster: ['NODE1']." -msgstr "Knoten, die nicht Mitglieder des angegebenen Clusters sind: ['NODE1']." - -msgid "Not enough parameters to do resize action." -msgstr "Nicht genügend Parameter, um die Aktion zu ändern." - -msgid "Notification endpoints to enable." -msgstr "Benachrichtigungsendpunkte zum Aktivieren" - -msgid "Notifying non-message receiver is not allowed." -msgstr "Das Benachrichtigen von Nicht-Nachrichtenempfängern ist nicht erlaubt." - -msgid "Number of backlog requests to configure the socket with." -msgstr "" -"Anzahl der Backlog-Anfragen, mit denen der Socket konfiguriert werden soll." - -msgid "Number of seconds before actual deletion happens." -msgstr "Anzahl der Sekunden vor dem tatsächlichen Löschen" - -msgid "Number of seconds before real deletion happens." -msgstr "Anzahl der Sekunden vor dem tatsächlichen Löschen" - -msgid "Number of seconds between lock retries." -msgstr "Anzahl der Sekunden zwischen Sperrversuche." - -msgid "" -"Number of seconds to hold the cluster for cool-down before allowing cluster " -"to be resized again." -msgstr "" -"Anzahl der Sekunden, in denen der Cluster zum Abkühlen gehalten wird, bevor " -"die Größe des Clusters erneut geändert werden kann." - -msgid "Number of seconds to wait before killing the container." -msgstr "" -"Anzahl der Sekunden, die gewartet werden muss, bevor der Container beendet " -"wird" - -msgid "Number of senlin-conductor threads." -msgstr "Anzahl der Arbeiter für den Senlin-Conductor-Dienst." - -msgid "Number of times trying to grab a lock." -msgstr "Anzahl der Versuche, eine Sperre zu erfassen." - -msgid "Number of workers for Senlin service." -msgstr "Anzahl der Arbeiter für den Senlin-Dienst." - -#, python-format -msgid "One of '%(p)s' and '%(n)s' must be provided" -msgstr "Eines von '%(p)s' und '%(n)s' muss angegeben werden" - -#, python-format -msgid "Only one '%s' is supported for now." -msgstr "Nur ein '%s' wird jetzt unterstützt." - -#, python-format -msgid "Original nodes not found: %s." -msgstr "Ursprüngliche Knoten nicht gefunden: %s." - -msgid "Output 'fixed_ip' is missing from the provided stack node" -msgstr "Die Ausgabe 'fixed_ip' fehlt im angegebenen Stack-Knoten" - -msgid "Parameters for the action" -msgstr "Parameter für die Aktion" - -msgid "Parameters to be passed to Heat for stack operations." -msgstr "Parameter, die an Heat für Stapeloperationen übergeben werden." - -msgid "Password for the administrator account." -msgstr "Passwort für das Administratorkonto" - -msgid "Password specified for the Senlin service user." -msgstr "Das Passwort wurde für den Benutzer des Senlin-Dienstes festgelegt." - -msgid "Pause a container." -msgstr "Pausiere einen Container." - -msgid "Pause the server from running." -msgstr "Unterbrechen Sie die Ausführung des Servers." - -#, python-format -msgid "" -"Policies specified (%(specified)s) doesn't match that of the existing " -"servergroup (%(existing)s)." -msgstr "" -"Die angegebenen Richtlinien (%(specified)s) stimmen nicht mit denen der " -"vorhandenen Servergruppe (%(existing)s) überein." - -msgid "Policy aspect for node failure detection." -msgstr "Richtlinienaspekt für die Erkennung von Knotenfehlern." - -msgid "Policy aspect for node failure recovery." -msgstr "Richtlinienaspekt für die Wiederherstellung von Knotenfehlern." - -#, python-format -msgid "Policy not applicable on profile type: %s" -msgstr "Richtlinie gilt nicht für Profiltyp:%s" - -msgid "Policy not specified." -msgstr "Richtlinie nicht angegeben" - -msgid "Port ID to be used by the network." -msgstr "Port ID, die vom Netzwerk verwendet werden soll." - -msgid "Port on which servers are running on the nodes." -msgstr "Port, auf dem Server auf den Knoten ausgeführt werden." - -msgid "Profile not specified." -msgstr "Profil nicht angegeben." - -#, python-format -msgid "Profile type of nodes %s do not match that of the cluster." -msgstr "Der Profiltyp der Knoten %s stimmt nicht mit dem des Clusters überein." - -#, python-format -msgid "Profile type of nodes %s does not match that of the cluster." -msgstr "Der Profiltyp der Knoten %s stimmt nicht mit dem des Clusters überein." - -msgid "Properties for the policy." -msgstr "Eigenschaften für die Richtlinie" - -msgid "Properties for the profile." -msgstr "Eigenschaften für das Profil" - -msgid "Properties of the VM server group" -msgstr "Eigenschaften der VM-Servergruppe" - -msgid "Protocol used for VIP." -msgstr "Protokoll für VIP verwendet." - -msgid "Protocol used for load balancing." -msgstr "Protokoll zum Lastenausgleich." - -msgid "" -"Purge event records which were created in the specified time period. The " -"time is specified by age and granularity, whose value must be one of 'days', " -"'hours', 'minutes' or 'seconds' (default)." -msgstr "" -"Ereignisdatensätze löschen, die im angegebenen Zeitraum erstellt wurden. Die " -"Zeit wird nach Alter und Granularität angegeben, deren Wert entweder 'Tage', " -"'Stunden', 'Minuten' oder 'Sekunden' (Standard) sein muss." - -msgid "" -"Purge event records which were created in the specified time period. The " -"time is specified by age and granularity. For example, granularity=hours and " -"age=2 means purging events created two hours ago. Defaults to 30." -msgstr "" -"Ereignisdatensätze löschen, die im angegebenen Zeitraum erstellt wurden. Die " -"Zeit wird durch Alter und Granularität angegeben. Granularität = Stunden und " -"Alter = 2 bedeutet beispielsweise, dass Ereignisse gelöscht werden, die vor " -"zwei Stunden erstellt wurden. Der Standardwert ist 30." - -msgid "" -"Purge event records with specified project. This can be specified multiple " -"times, or once with parameters separated by semicolon." -msgstr "" -"Ereignisdatensätze mit dem angegebenen Projekt löschen Dies kann mehrmals " -"oder einmal mit durch Semikolon getrennten Parametern angegeben werden." - -msgid "Quota exceeded for resources." -msgstr "Kontingent für Ressourcen überschritten." - -msgid "" -"RPC timeout for the engine liveness check that is used for cluster locking." -msgstr "" -"RPC-Zeitlimit für die Überprüfung der Systemlebensdauer, die für die " -"Clustersperrung verwendet wird." - -msgid "Reboot the nova server." -msgstr "Starte den nova Server neu." - -msgid "Rebuild the server using current image and admin password." -msgstr "" -"Erstellen Sie den Server mit dem aktuellen Abbild und dem " -"Administratorkennwort neu." - -msgid "Recovery action REBOOT is only applicable to os.nova.server clusters." -msgstr "Recovery-Aktion REBOOT ist nur auf os.nova.server-Cluster anwendbar." - -msgid "Recovery action REBUILD is only applicable to os.nova.server clusters." -msgstr "" -"Die Wiederherstellungsaktion REBUILD ist nur auf os.nova.server-Cluster " -"anwendbar." - -#, python-format -msgid "Replacement nodes not found: %s." -msgstr "Ersatzknoten nicht gefunden: %s." - -#, python-format -msgid "Request body missing '%s' key." -msgstr "Anfragekörper fehlt Schlüssel '%s'." - -#, python-format -msgid "Request limit exceeded: %(message)s" -msgstr "Anforderungslimit überschritten: %(message)s" - -#, python-format -msgid "Required parameter '%s' not provided" -msgstr "Erforderlicher Parameter '%s' nicht angegeben" - -msgid "Required path attribute is missing." -msgstr "Erforderliches Pfadattribut fehlt." - -#, python-format -msgid "Required spec item '%s' not provided" -msgstr "Das erforderliche Spezifikationselement '%s' wurde nicht angegeben" - -msgid "Rescue the server." -msgstr "Retten Sie den Server." - -msgid "Restart a container." -msgstr "Starten Sie einen Container neu." - -msgid "Resume the running of the server." -msgstr "Fortsetzen der Ausführung des Servers." - -msgid "Scaling request validated." -msgstr "Skalierungsanforderung validiert." - -#, python-format -msgid "Schema valid only for List or Map, not %s" -msgstr "Schema ist nur für Liste oder Map gültig, nicht %s" - -msgid "Seconds between running periodic tasks." -msgstr "Sekunden zwischen dem Ausführen periodischer Aufgaben." - -msgid "" -"Seconds to pause between scheduling two consecutive batches of node actions." -msgstr "" -"Sekunden, um zwischen der Planung zweier aufeinander folgender Stapel von " -"Knotenaktionen zu pausieren." - -msgid "Senlin API revision." -msgstr "Senlin API Revision." - -msgid "Senlin engine revision." -msgstr "Senlin-Engineüberarbeitung." - -msgid "Senlin service user name." -msgstr "Senlin-Dienstbenutzername" - -msgid "Servergroup resource deletion succeeded." -msgstr "Löschen der Servergruppenressource erfolgreich" - -msgid "Service ID" -msgstr "Dienst-ID" - -msgid "Service to be fenced." -msgstr "Service der eingezäunt werden soll." - -msgid "Session persistence configuration." -msgstr "Konfiguration der Sitzungspersistenz" - -msgid "Show available commands." -msgstr "Zeige verfügbare Befehle." - -msgid "Size of the block device in MB(for swap) and in GB(for other formats)" -msgstr "Größe des Blockgeräts in MB (für Swap) und in GB (für andere Formate)" - -#, python-format -msgid "Some keys in 'context' are invalid: %s" -msgstr "Einige Schlüssel im 'Kontext' sind ungültig: %s" - -msgid "Specifies the disk file system format(e.g. swap, ephemeral, ...)." -msgstr "Gibt das Dateisystem des Dateisystems an (z. B. swap, ephemeral, ...)." - -msgid "Start the server." -msgstr "Starten Sie den Server." - -msgid "Status" -msgstr "Status" - -msgid "Stop the server." -msgstr "Stoppen Sie den Server." - -msgid "Suspend the running of the server." -msgstr "Unterbrechen Sie die Ausführung des Servers." - -msgid "System SIGHUP signal received." -msgstr "System SIGHUP-Signal empfangen." - -msgid "TCP port to listen on." -msgstr "TCP-Port zum Anhören." - -#, python-format -msgid "Testing message %(text)s" -msgstr "Testnachricht %(text)s" - -#, python-format -msgid "The %(type)s '%(id)s' cannot be deleted: %(reason)s." -msgstr "Die %(type)s '%(id)s' kann nicht gelöscht werden: %(reason)s." - -#, python-format -msgid "The %(type)s '%(id)s' could not be found." -msgstr "Die %(type)s '%(id)s' konnte nicht gefunden werden." - -#, python-format -msgid "The %(type)s '%(id)s' is busy now." -msgstr "Die %(type)s '%(id)s' ist jetzt beschäftigt." - -#, python-format -msgid "The %(type)s '%(id)s' is in status %(status)s." -msgstr "Die %(type)s '%(id)s' hat den Status %(status)s." - -#, python-format -msgid "" -"The '%(p)s' property and the '%(fip)s' property cannot be specified at the " -"same time" -msgstr "" -"Die Eigenschaft '%(p)s' und die Eigenschaft '%(fip)s' können nicht " -"gleichzeitig angegeben werden" - -msgid "The 'type' key is missing from the provided spec map." -msgstr "Der Typschlüssel fehlt in der angegebenen Spezifikationsübersicht." - -msgid "The 'version' key is missing from the provided spec map." -msgstr "" -"Der Schlüssel 'Version' fehlt in der bereitgestellten " -"Spezifikationsübersicht." - -msgid "The API paste config file to use." -msgstr "Die zu verwendende API-Einfügekonfigurationsdatei." - -msgid "The HTTP method that the monitor uses for requests." -msgstr "Die HTTP-Methode, die der Monitor für Anforderungen verwendet." - -msgid "" -"The HTTP path of the request sent by the monitor to test the health of a " -"member." -msgstr "" -"Der HTTP-Pfad der Anfrage, die vom Monitor gesendet wurde, um den Status " -"eines Mitglieds zu testen." - -msgid "" -"The address for notifying and triggering receivers. It is useful for case " -"Senlin API service is running behind a proxy." -msgstr "" -"Die Adresse zum Benachrichtigen und Auslösen von Empfängern. Dies ist " -"nützlich, wenn der Senlin-API-Dienst hinter einem Proxy ausgeführt wird." - -msgid "The amount of time in milliseconds between sending probes to members." -msgstr "" -"Die Zeit in Millisekunden zwischen dem Senden von Probes an Mitglieder." - -msgid "" -"The cluster 'FAKE_CLUSTER' cannot be deleted: still referenced by " -"profile(s): ['profile1']." -msgstr "" -"Der Cluster 'FAKE_CLUSTER' kann nicht gelöscht werden: wird immer noch von " -"Profil (en) referenziert: ['profile1']." - -#, python-format -msgid "The cluster (%s) contains no active nodes" -msgstr "Der Cluster (%s) enthält keine aktiven Knoten" - -msgid "The cluster (host_cluster) contains no active nodes" -msgstr "Der Cluster (host_cluster) enthält keine aktiven Knoten" - -msgid "The cluster on which container will be launched." -msgstr "Der Cluster, auf dem der Container gestartet werden soll." - -msgid "The command to run when container is started." -msgstr "Der Befehl, der beim Starten des Containers ausgeführt werden soll." - -msgid "The data provided is not a map" -msgstr "Die bereitgestellten Daten sind keine Map" - -msgid "The directory to search for environment files." -msgstr "Das Verzeichnis für die Suche nach Umgebungsdateien." - -msgid "The floating IP address to be associated with this port." -msgstr "Die Floating-IP-Adresse, die diesem Port zugeordnet werden soll." - -msgid "The host cluster 'host_cluster' could not be found." -msgstr "Der Host-Cluster 'host_cluster' konnte nicht gefunden werden." - -msgid "The host node 'fake_node' could not be found." -msgstr "Der Hostknoten 'fake_node' konnte nicht gefunden werden." - -msgid "The image used to create a container" -msgstr "Das Abbild, das zum Erstellen eines Containers verwendet wird" - -msgid "The input is not a JSON object or YAML mapping." -msgstr "Die Eingabe ist kein JSON-Objekt oder YAML-Mapping." - -msgid "The max size(bytes) of message can be posted to notification queue." -msgstr "" -"Die maximale Größe (Bytes) der Nachricht kann an die " -"Benachrichtigungswarteschlange gesendet werden." - -msgid "The max size(bytes) of message can be posted to receiver queue." -msgstr "" -"Die maximale Größe (Bytes) der Nachricht kann an die Empfängerwarteschlange " -"gesendet werden." - -msgid "" -"The maximum time in milliseconds that a monitor waits to connect before it " -"times out." -msgstr "" -"Die maximale Zeit in Millisekunden, die ein Monitor auf die Verbindung " -"wartet, bevor das Zeitlimit überschritten wird." - -msgid "The name of the container." -msgstr "Der Name des Containers." - -msgid "The name of the server group" -msgstr "Der Name der Servergruppe" - -#, python-format -msgid "The node named (%(name)s) already exists." -msgstr "Der Knoten mit dem Namen (%(name)s) existiert bereits." - -#, python-format -msgid "The node named (%s) already exists." -msgstr "Der Knoten mit dem Namen (%s) existiert bereits." - -msgid "The node named (NODE1) already exists." -msgstr "Der Knoten mit dem Namen (NODE1) existiert bereits." - -msgid "The node on which container will be launched." -msgstr "Der Knoten, auf dem der Container gestartet werden soll." - -msgid "" -"The number of allowed connection failures before changing the status of the " -"member to INACTIVE." -msgstr "" -"Die Anzahl der zulässigen Verbindungsfehler, bevor der Status des Mitglieds " -"in INACTIVE geändert wird." - -msgid "The params provided is not a map." -msgstr "Die bereitgestellten Params sind keine Karte." - -#, python-format -msgid "The policy '%(p)s' is not attached to the specified cluster '%(c)s'." -msgstr "" -"Die Richtlinie '%(p)s' ist nicht an den angegebenen Cluster %(c)s' angehängt." - -#, python-format -msgid "" -"The policy '%(policy)s' is not attached to the specified cluster " -"'%(cluster)s'." -msgstr "" -"Die Richtlinie '%(policy)s' ist nicht an den angegebenen Cluster " -"'%(cluster)s' angehängt." - -#, python-format -msgid "" -"The policy '%(policy)s' is not found attached to the specified cluster " -"'%(identity)s'." -msgstr "" -"Die Richtlinie '%(policy)s' wurde nicht an den angegebenen Cluster " -"'%(identity)s' angehängt gefunden." - -msgid "" -"The policy 'POLICY_ID' cannot be deleted: still attached to some clusters." -msgstr "" -"Die Richtlinie 'POLICY_ID' kann nicht gelöscht werden: immer noch an einige " -"Cluster angehängt." - -#, python-format -msgid "The policy with type '%(policy_type)s' already exists." -msgstr "Die Richtlinie mit dem Typ '%(policy_type)s' existiert bereits." - -msgid "" -"The port for notifying and triggering receivers. It is useful for case " -"Senlin API service is running behind a proxy." -msgstr "" -"Der Port zum Benachrichtigen und Auslösen von Empfängern. Dies ist nützlich, " -"wenn der Senlin-API-Dienst hinter einem Proxy ausgeführt wird." - -msgid "The port number used to connect to docker daemon." -msgstr "" -"Die Portnummer, die für die Verbindung mit dem Docker-Dämon verwendet wird." - -msgid "The port on which the server will listen." -msgstr "Der Port, an dem der Server zuhören soll." - -msgid "The provided spec is not a map." -msgstr "Die angegebene Spezifikation ist keine Karte." - -#, python-format -msgid "" -"The requested operation '%(o)s' is not supported by the profile type '%(t)s'." -msgstr "" -"Die angeforderte Operation '%(o)s' wird vom Profiltyp '%(t)s' nicht " -"unterstützt." - -msgid "" -"The server could not comply with the request since it is either malformed or " -"otherwise incorrect." -msgstr "" -"Der Server konnte der Anforderung nicht entsprechen, da sie entweder " -"fehlerhaft oder auf andere Weise falsch ist." - -msgid "The server group policies." -msgstr "Die Servergruppenrichtlinien." - -#, python-format -msgid "The specified %(k)s '%(v)s' could not be found." -msgstr "Das angegebene %(k)s '%(v)s' konnte nicht gefunden werden." - -#, python-format -msgid "The specified %(k)s '%(v)s' is disabled" -msgstr "Das angegebene %(k)s '%(v)s' ist deaktiviert" - -#, python-format -msgid "The specified %(key)s '%(val)s' could not be found or is not unique." -msgstr "" -"Der angegebene %(key)s '%(val)s' konnte nicht gefunden werden oder ist nicht " -"eindeutig." - -#, python-format -msgid "The specified %(key)s '%(value)s' could not be found" -msgstr "Der angegebene %(key)s '%(value)s' konnte nicht gefunden werden" - -#, python-format -msgid "The specified %(key)s '%(value)s' could not be found." -msgstr "Der angegebene %(key)s '%(value)s' konnte nicht gefunden werden." - -#, python-format -msgid "" -"The specified max_size (%(m)s) is greater than the maximum number of nodes " -"allowed per cluster (%(mc)s)." -msgstr "" -"Die angegebene max_size (%(m)s) ist größer als die maximale Anzahl an Knoten " -"pro Cluster (%(mc)s)." - -#, python-format -msgid "" -"The specified max_size (%(m)s) is less than the current desired_capacity " -"(%(d)s) of the cluster." -msgstr "" -"Die angegebene max_size (%(m)s) ist kleiner als die aktuelle Sollkapazität " -"(%(d)s) des Clusters." - -#, python-format -msgid "" -"The specified max_size (%(m)s) is less than the current min_size (%(n)s) of " -"the cluster." -msgstr "" -"Die angegebene max_size (%(m)s) ist kleiner als die aktuelle min_size " -"(%(n)s) des Clusters." - -#, python-format -msgid "" -"The specified min_size (%(n)s) is greater than the current desired_capacity " -"(%(d)s) of the cluster." -msgstr "" -"Die angegebene min_size (%(n)s) ist größer als die aktuelle Sollkapazität " -"(%(d)s) des Clusters." - -#, python-format -msgid "" -"The specified min_size (%(n)s) is greater than the current max_size (%(m)s) " -"of the cluster." -msgstr "" -"Die angegebene min_size (%(n)s) ist größer als die aktuelle max_size (%(m)s " -"des Clusters." - -#, python-format -msgid "" -"The specified min_size (%(n)s) is greater than the specified max_size " -"(%(m)s)." -msgstr "" -"Die angegebene min_size (%(n)s) ist größer als die angegebene max_size " -"(%(m)s)." - -#, python-format -msgid "" -"The specified nodes %(n)s to be replaced are not members of the cluster " -"%(c)s." -msgstr "" -"Die angegebenen Knoten %(n)s, die ersetzt werden sollen, sind keine " -"Mitglieder des Clusters %(c)s." - -#, python-format -msgid "The specified regions '%(value)s' could not be found." -msgstr "Die angegebenen Regionen '%(value)s ' konnten nicht gefunden werden." - -#, python-format -msgid "The status of the port %(p)s must be DOWN" -msgstr "Der Status des Ports %(p)s muss DOWN sein" - -#, python-format -msgid "" -"The target capacity (%(d)s) is greater than the cluster's max_size (%(m)s)." -msgstr "" -"Die Zielkapazität (%(d)s) ist größer als die max_size des Clusters (%(m)s)." - -#, python-format -msgid "" -"The target capacity (%(d)s) is greater than the maximum number of nodes " -"allowed per cluster (%(m)s)." -msgstr "" -"Die Zielkapazität (%(d)s) ist größer als die maximale Anzahl der pro Cluster " -"zulässigen Knoten (%(m)s)." - -#, python-format -msgid "" -"The target capacity (%(d)s) is greater than the specified max_size (%(m)s)." -msgstr "" -"Die Zielkapazität (%(d)s) ist größer als die angegebene max_size (%(m)s)." - -#, python-format -msgid "" -"The target capacity (%(d)s) is less than the cluster's min_size (%(m)s)." -msgstr "" -"Die Zielkapazität (%(d)s) ist kleiner als die min_size des Clusters (%(m)s)." - -#, python-format -msgid "" -"The target capacity (%(d)s) is less than the specified min_size (%(m)s)." -msgstr "" -"Die Zielkapazität (%(d)s) ist kleiner als die angegebene min_size (%(m)s)." - -msgid "The target capacity (11) is greater than the specified max_size (10)." -msgstr "Die Zielkapazität (11) ist größer als die angegebene max_size (10)." - -msgid "The target host to evacuate the server." -msgstr "Der Zielhost, um den Server zu evakuieren." - -#, python-format -msgid "The trust for trustor '%(trustor)s' could not be found." -msgstr "Der Trust für Treugeber '%(trustor)s' konnte nicht gefunden werden." - -msgid "The ttl in seconds of a message posted to notification queue." -msgstr "" -"Der Wert in Sekunden für eine Nachricht, die an die " -"Benachrichtigungswarteschlange gesendet wurde." - -msgid "The type of probe sent by the loadbalancer to verify the member state." -msgstr "" -"Die Art der Sonde, die vom Loadbalancer gesendet wird, um den " -"Mitgliedszustand zu überprüfen." - -#, python-format -msgid "The value '%s' is not a valid Boolean" -msgstr "Der Wert '%s' ist kein gültiger boolescher Wert" - -#, python-format -msgid "The value '%s' is not a valid Integer" -msgstr "Der Wert '%s' ist keine gültige Ganzzahl" - -#, python-format -msgid "The value '%s' is not a valid number." -msgstr "Der Wert '%s' ist keine gültige Zahl." - -#, python-format -msgid "The value '%s' is not a valid string." -msgstr "Der Wert '%s' ist keine gültige Zeichenfolge." - -#, python-format -msgid "The value (%s) is not a valid JSON." -msgstr "Der Wert (%s) ist kein gültiger JSON." - -#, python-format -msgid "The value for %(attr)s is not a valid UUID: '%(value)s'." -msgstr "Der Wert für %(attr)s ist keine gültige UUID: '%(value)s'." - -#, python-format -msgid "The value for %(attr)s must be an integer: '%(value)s'." -msgstr "Der Wert für %(attr)s muss eine Ganzzahl sein: '%(value)s'." - -#, python-format -msgid "The value for the %(a)s field must be greater than or equal to %(n)d." -msgstr "Der Wert für das Feld %(a)s muss größer oder gleich %(n)d sein." - -#, python-format -msgid "The value for the %(a)s field must be less than or equal to %(n)d." -msgstr "Der Wert für das Feld %(a)s muss kleiner oder gleich %(n)d sein." - -#, python-format -msgid "" -"The value for the %(attr)s field must be at least %(count)d characters long." -msgstr "" -"Der Wert für das Feld %(attr)s muss mindestens %(count)d Zeichen lang sein." - -#, python-format -msgid "" -"The value for the %(attr)s field must be less than %(count)d characters long." -msgstr "" -"Der Wert für das Feld %(attr)s muss kleiner als %(count)d Zeichen lang sein." - -#, python-format -msgid "" -"The value for the '%(attr)s' (%(value)s) contains illegal characters. It " -"must contain only alphanumeric or \"_-.~\" characters and must start with " -"letter." -msgstr "" -"Der Wert für '%(attr)s' (%(value)s) enthält ungültige Zeichen. Es darf nur " -"alphanumerische oder '_-. ~' Zeichen enthalten und muss mit einem Buchstaben " -"beginnen." - -msgid "" -"The value for the socket option TCP_KEEPIDLE. This is the time in seconds " -"that the connection must be idle before TCP starts sending keepalive probes." -msgstr "" -"Der Wert für die Socketoption TCP_KEEPIDLE. Dies ist die Zeit in Sekunden, " -"in der die Verbindung inaktiv sein muss, bevor TCP mit dem Senden von " -"Keepalive-Tests beginnt." - -#, python-format -msgid "" -"The value of 'maximum' cannot be greater than the global constraint (%(m)d)." -msgstr "" -"Der Wert von 'Maximum' darf nicht größer sein als die globale Einschränkung " -"(%(m)d)." - -msgid "" -"The value of 'maximum' must be greater than or equal to that of the " -"'minimum' specified." -msgstr "" -"Der Wert von 'Maximum' muss größer oder gleich dem angegebenen 'Minimum' " -"sein." - -#, python-format -msgid "" -"The value of 'minimum' cannot be greater than the global constraint (%(m)d)." -msgstr "" -"Der Wert von 'Minimum' darf nicht größer sein als die globale Einschränkung " -"(%(m)d)." - -msgid "There is no feasible plan to handle all nodes." -msgstr "Es gibt keinen realisierbaren Plan, um alle Knoten zu behandeln." - -msgid "" -"Time in second to wait for loadbalancer to become ready after senlin " -"requests LBaaS V2 service for operations." -msgstr "" -"Zeit in Sekunden, um auf den Loadbalancer zu warten, nachdem senlin den " -"LBaaS V2-Dienst für Operationen angefordert hat." - -msgid "" -"Timeout for client connections' socket operations. If an incoming connection " -"is idle for this number of seconds it will be closed. A value of '0' " -"indicates waiting forever." -msgstr "" -"Timeout für Socket-Operationen von Clientverbindungen Wenn eine eingehende " -"Verbindung für diese Anzahl von Sekunden inaktiv ist, wird sie geschlossen. " -"Ein Wert von '0' zeigt an, dass auf ewig gewartet wird." - -msgid "Timeout in seconds for actions." -msgstr "Timeout in Sekunden für Aktionen" - -msgid "Topic" -msgstr "Thema" - -msgid "Type of adjustment when scaling is triggered." -msgstr "Art der Anpassung, wenn die Skalierung ausgelöst wird." - -#, python-format -msgid "Type of host node (%s) is not supported" -msgstr "Der Typ des Host-Knotens (%s) wird nicht unterstützt" - -msgid "Type of host node (wrong_type) is not supported" -msgstr "Der Typ des Host-Knotens (falscher_Typ) wird nicht unterstützt" - -msgid "Type of lifecycle hook" -msgstr "Art des Lebenszyklus-Hooks" - -msgid "Type of node failure detection." -msgstr "Art der Knotenfehlererkennung." - -msgid "Type of reboot which can be 'SOFT' or 'HARD'." -msgstr "Art des Neustarts, der 'SOFT' oder 'HARD' sein kann." - -msgid "Type of session persistence implementation." -msgstr "Art der Implementierung der Sitzungspersistenz" - -msgid "Type of the device(e.g. disk, cdrom, ...)." -msgstr "Typ des Gerätes (z.B. Disk, CD-ROM, ...)." - -msgid "Unable to determine the IP address of host node" -msgstr "Die IP-Adresse des Host-Knotens konnte nicht ermittelt werden" - -#, python-format -msgid "" -"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" -"Got: %(e)r" -msgstr "" -"%(app_name)s konnte nicht aus der Konfigurationsdatei %(conf_file)s geladen " -"werden. Bekam: %(e)r" - -msgid "Unable to locate config file" -msgstr "Die Konfigurationsdatei konnte nicht gefunden werden" - -msgid "Unlock the server." -msgstr "Entsperren Sie den Server." - -msgid "Unpause a container." -msgstr "Heben Sie einen Container auf." - -msgid "Unpause the server to running state." -msgstr "Hängen Sie den Server in den aktiven Zustand." - -#, python-format -msgid "Unrecognizable parameter '%s'" -msgstr "Nicht erkennbarer Parameter '%s'" - -#, python-format -msgid "Unrecognizable spec item '%s'" -msgstr "Nicht erkennbarer Objekttyp '%s'" - -#, python-format -msgid "Unrecognized action '%s' specified" -msgstr "Nicht erkannte Aktion '%s' angegeben" - -msgid "Unrescue the server." -msgstr "Entlasten Sie den Server." - -msgid "Unsupported action" -msgstr "Nicht unterstützte Aktion" - -#, python-format -msgid "Unsupported sort dir '%(value)s' for '%(attr)s'." -msgstr "Nicht unterstütztes Sortierziel '%(value)s' für '%(attr)s'." - -#, python-format -msgid "Unsupported sort key '%(value)s' for '%(attr)s'." -msgstr "Nicht unterstützter Sortierschlüssel '%(value)s' für '%(attr)ss'." - -msgid "Updated At" -msgstr "Aktualisiert am" - -msgid "Updating Nova server with image set to None is not supported by Nova" -msgstr "" -"Die Aktualisierung von Nova Server mit Image auf 'None' wird von Nova nicht " -"unterstützt" - -msgid "Updating a cluster in error state" -msgstr "Aktualisieren eines Clusters im Fehlerzustand" - -msgid "Url sink to which to send lifecycle hook message" -msgstr "URL-Senke, an die die Lifecycle-Hook-Nachricht gesendet werden soll" - -msgid "User data to be exposed by the metadata server." -msgstr "" -"Benutzerdaten, die vom Metadatenserver verfügbar gemacht werden sollen." - -msgid "VIP address and port of the pool." -msgstr "VIP-Adresse und Port des Pools." - -#, python-format -msgid "Value '%(value)s' is not acceptable for field '%(attr)s'." -msgstr "Der Wert '%(value)s' ist für das Feld '%(attr)s' nicht akzeptabel." - -#, python-format -msgid "Value for '%(attr)s' must have at least %(num)s item(s)." -msgstr "Der Wert für '%(attr)s' muss mindestens %(num)s Element(e) enthalten." - -#, python-format -msgid "Value must be >= 0 for field '%s'." -msgstr "Der Wert muss für das Feld '%s' >= 0 sein." - -#, python-format -msgid "" -"Version '%(req_ver)s' is not supported by the API. Minimum is '%(min_ver)s' " -"and maximum is '%(max_ver)s'." -msgstr "" -"Version '%(req_ver)s' wird von der API nicht unterstützt. Minimum ist " -"'%(min_ver)s' und Maximum ist '%(max_ver)s'." - -msgid "Version number of the policy type." -msgstr "Versionsnummer des Richtlinientyps" - -msgid "Version number of the profile type." -msgstr "Versionsnummer des Profiltyps" - -msgid "Volume destination type, must be 'volume' or 'local'" -msgstr "Datenträger-Zieltyp muss 'volume' oder 'local' sein" - -msgid "" -"Volume source type, must be one of 'image', 'snapshot', 'volume' or 'blank'" -msgstr "" -"Datenträger-Quellentyp, muss entweder 'image', 'snapshot', 'volume' oder " -"'blank' sein" - -msgid "Weight of the availability zone (default is 100)." -msgstr "Gewicht der Verfügbarkeitszone (Standard ist 100)." - -msgid "Weight of the region. The default is 100." -msgstr "Gewicht der Region. Der Standardwert ist 100." - -msgid "" -"When adjustment type is set to \"CHANGE_IN_PERCENTAGE\", this specifies the " -"cluster size will be decreased by at least this number of nodes." -msgstr "" -"Wenn der Anpassungstyp auf 'CHANGE_IN_PERCENTAGE' eingestellt ist, bedeutet " -"dies, dass die Clustergröße um mindestens diese Anzahl von Knoten verringert " -"wird." - -msgid "" -"When running server in SSL mode, you must specify both a cert_file and " -"key_file option value in your configuration file" -msgstr "" -"Wenn Sie den Server im SSL-Modus ausführen, müssen Sie in der " -"Konfigurationsdatei den Wert cert_file und den Wert für die Option key_file " -"angeben" - -msgid "" -"Whether a node should be completely destroyed after deletion. Default to True" -msgstr "" -"Ob ein Knoten nach dem Löschen vollständig zerstört werden soll. Standard " -"auf Wahr" - -msgid "Whether config drive should be enabled for the server." -msgstr "Ob das Konfigurationslaufwerk für den Server aktiviert werden soll." - -msgid "" -"Whether do best effort scaling when new size of cluster will break the size " -"limitation" -msgstr "" -"Ob Best-Effort-Skalierung, wenn die neue Clustergröße die Größenbeschränkung " -"überschreitet" - -msgid "" -"Whether the desired capacity of the cluster should be reduced along the " -"deletion. Default to True." -msgstr "" -"Ob die gewünschte Kapazität des Clusters entlang der Löschung reduziert " -"werden soll. Standard auf Wahr." - -msgid "Whether the disk partition is done automatically." -msgstr "Gibt an, ob die Festplattenpartition automatisch erstellt wird." - -msgid "Whether the evacuation should be a forced one." -msgstr "Ob die Evakuierung eine erzwungene sein sollte." - -msgid "Whether to delete the volume when the server stops." -msgstr "Gibt an, ob das Volume beim Stoppen des Servers gelöscht werden soll." - -msgid "You are not authenticated." -msgstr "Sie sind nicht authentifiziert." - -msgid "You are not authorized to complete this operation." -msgstr "Sie sind nicht berechtigt, diesen Vorgang abzuschließen." - -msgid "Zaqar queue to receive lifecycle hook message" -msgstr "Zaqar-Warteschlange, um Lifecycle-Hook-Nachricht zu erhalten" - -#, python-format -msgid "a cluster named '%s' already exists." -msgstr "Ein Cluster namens '%s' existiert bereits." - -msgid "a cluster named 'CLUSTER' already exists." -msgstr "Ein Cluster mit dem Namen 'CLUSTER' existiert bereits." - -#, python-format -msgid "environment has unknown section \"%s\"" -msgstr "Umgebung hat unbekannten Abschnitt '%s'" - -msgid "lifecycle hook parameters saved" -msgstr "Lifecycle-Hook-Parameter gespeichert" - -#, python-format -msgid "" -"nodes %s are depended by other nodes, so can't be deleted or become orphan " -"nodes" -msgstr "" -"Knoten %s sind von anderen Knoten abhängig und können daher nicht gelöscht " -"oder zu verwaisten Knoten werden" - -msgid "" -"nodes ['NODE1'] are depended by other nodes, so can't be deleted or become " -"orphan nodes" -msgstr "" -"Knoten ['NODE1'] sind von anderen Knoten abhängig und können daher nicht " -"gelöscht oder zu verwaisten Knoten werden" - -msgid "server doesn't have an image and it has no bootable volume" -msgstr "Der Server hat kein Abbild und keinen bootfähigen Datenträger" - -msgid "still attached to some clusters" -msgstr "immer noch an einige Cluster angehängt" - -msgid "still depended by other clusters and/or nodes" -msgstr "immer noch von anderen Clustern und/oder Knoten abhängig" - -msgid "still in one of WAITING, RUNNING or SUSPENDED state" -msgstr "immer noch in einem Zustand WAITING, RUNNING oder SUSPENDED" - -#, python-format -msgid "still referenced by profile(s): %s" -msgstr "immer noch nach Profil(en) referenziert: %s" - -msgid "still referenced by some clusters and/or nodes." -msgstr "wird immer noch von einigen Clustern und / oder Knoten referenziert." - -msgid "the 'cooldown' for 'adjustment' must be >= 0" -msgstr "Die 'Abklingzeit' für 'Anpassung' muss >= 0 sein" - -msgid "the 'min_step' for 'adjustment' must be >= 0" -msgstr "der 'min_step' für 'adjustment' muss >= 0 sein" - -msgid "the 'number' for 'adjustment' must be > 0" -msgstr "Die 'Nummer' für 'Anpassung' muss > 0 sein" - -#, python-format -msgid "the floating IP %s has been used." -msgstr "die Floating IP %s wurde verwendet." diff --git a/senlin/objects/__init__.py b/senlin/objects/__init__.py deleted file mode 100644 index fb2716e51..000000000 --- a/senlin/objects/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# When objects are registered, an attribute is set on this module -# automatically, pointing to the latest version of the object. - - -def register_all(): - # Objects should be imported here in order to be registered by services - # that may need to receive it via RPC. - __import__('senlin.objects.action') - __import__('senlin.objects.cluster') - __import__('senlin.objects.cluster_lock') - __import__('senlin.objects.cluster_policy') - __import__('senlin.objects.credential') - __import__('senlin.objects.dependency') - __import__('senlin.objects.event') - __import__('senlin.objects.health_registry') - __import__('senlin.objects.node') - __import__('senlin.objects.node_lock') - __import__('senlin.objects.notification') - __import__('senlin.objects.policy') - __import__('senlin.objects.profile') - __import__('senlin.objects.receiver') - __import__('senlin.objects.requests.actions') - __import__('senlin.objects.requests.build_info') - __import__('senlin.objects.requests.clusters') - __import__('senlin.objects.requests.cluster_policies') - __import__('senlin.objects.requests.credentials') - __import__('senlin.objects.requests.events') - __import__('senlin.objects.requests.nodes') - __import__('senlin.objects.requests.policies') - __import__('senlin.objects.requests.policy_type') - __import__('senlin.objects.requests.profiles') - __import__('senlin.objects.requests.profile_type') - __import__('senlin.objects.requests.receivers') - __import__('senlin.objects.requests.webhooks') - __import__('senlin.objects.service') diff --git a/senlin/objects/action.py b/senlin/objects/action.py deleted file mode 100644 index 8aa2b6e83..000000000 --- a/senlin/objects/action.py +++ /dev/null @@ -1,216 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Action object.""" - -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.common import utils -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Action(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin action object.""" - - fields = { - 'id': fields.UUIDField(), - 'created_at': fields.DateTimeField(), - 'updated_at': fields.DateTimeField(nullable=True), - 'name': fields.StringField(), - 'cluster_id': fields.StringField(), - 'context': fields.JsonField(), - 'target': fields.UUIDField(), - 'action': fields.StringField(), - 'cause': fields.StringField(), - 'owner': fields.UUIDField(nullable=True), - 'interval': fields.IntegerField(nullable=True), - 'start_time': fields.FloatField(nullable=True), - 'end_time': fields.FloatField(nullable=True), - 'timeout': fields.IntegerField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(nullable=True), - 'control': fields.StringField(nullable=True), - 'inputs': fields.JsonField(nullable=True), - 'outputs': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'dep_on': fields.CustomListField(attr_name='depended', nullable=True), - 'dep_by': fields.CustomListField(attr_name='dependent', nullable=True), - } - - @classmethod - def create(cls, context, values): - obj = db_api.action_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def find(cls, context, identity, **kwargs): - """Find an action with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of an action. - :param dict kwargs: Other query parameters. - :return: A DB object of action or an exception `ResourceNotFound` if - no matching action is found. - """ - if uuidutils.is_uuid_like(identity): - action = cls.get(context, identity, **kwargs) - if not action: - action = cls.get_by_name(context, identity, **kwargs) - else: - action = cls.get_by_name(context, identity, **kwargs) - if not action: - action = cls.get_by_short_id(context, identity, **kwargs) - - if not action: - raise exception.ResourceNotFound(type='action', id=identity) - - return action - - @classmethod - def get(cls, context, action_id, **kwargs): - obj = db_api.action_get(context, action_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.action_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.action_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def action_list_active_scaling(cls, context, cluster_id, **kwargs): - objs = db_api.action_list_active_scaling(context, cluster_id, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.action_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_all_by_owner(cls, context, owner): - objs = db_api.action_get_all_by_owner(context, owner) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_all_active_by_target(cls, context, target): - objs = db_api.action_get_all_active_by_target(context, target) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def check_status(cls, context, action_id, timestamp): - return db_api.action_check_status(context, action_id, timestamp) - - @classmethod - def mark_succeeded(cls, context, action_id, timestamp): - return db_api.action_mark_succeeded(context, action_id, timestamp) - - @classmethod - def mark_ready(cls, context, action_id, timestamp): - return db_api.action_mark_ready(context, action_id, timestamp) - - @classmethod - def mark_failed(cls, context, action_id, timestamp, reason=None): - return db_api.action_mark_failed(context, action_id, timestamp, reason) - - @classmethod - def mark_cancelled(cls, context, action_id, timestamp): - return db_api.action_mark_cancelled(context, action_id, timestamp) - - @classmethod - def acquire(cls, context, action_id, owner, timestamp): - return db_api.action_acquire(context, action_id, owner, timestamp) - - @classmethod - def acquire_random_ready(cls, context, owner, timestamp): - return db_api.action_acquire_random_ready(context, owner, timestamp) - - @classmethod - def acquire_first_ready(cls, context, owner, timestamp): - return db_api.action_acquire_first_ready(context, owner, timestamp) - - @classmethod - def abandon(cls, context, action_id, values=None): - return db_api.action_abandon(context, action_id, values) - - @classmethod - def signal(cls, context, action_id, value): - return db_api.action_signal(context, action_id, value) - - @classmethod - def signal_query(cls, context, action_id): - return db_api.action_signal_query(context, action_id) - - @classmethod - def lock_check(cls, context, action_id, owner=None): - return db_api.action_lock_check(context, action_id, owner) - - @classmethod - def update(cls, context, action_id, values): - return db_api.action_update(context, action_id, values) - - @classmethod - def delete(cls, context, action_id): - db_api.action_delete(context, action_id) - - @classmethod - def delete_by_target(cls, context, target, action=None, - action_excluded=None, status=None): - """Delete an action with the target and other given params. - - :param target: The ID of the target cluster/node - :param action: A list of actions to be included. - :param action_excluded: A list of actions to be excluded. - :param status: A list of statuses to be delete filtered. - :return: None. - """ - return db_api.action_delete_by_target(context, target, action=action, - action_excluded=action_excluded, - status=status) - - def to_dict(self): - action_dict = { - 'id': self.id, - 'name': self.name, - 'cluster_id': self.cluster_id, - 'action': self.action, - 'target': self.target, - 'cause': self.cause, - 'owner': self.owner, - 'interval': self.interval, - 'start_time': self.start_time, - 'end_time': self.end_time, - 'timeout': self.timeout, - 'status': self.status, - 'status_reason': self.status_reason, - 'inputs': self.inputs, - 'outputs': self.outputs, - 'depends_on': self.dep_on, - 'depended_by': self.dep_by, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'data': self.data, - 'user': self.user, - 'project': self.project, - } - return action_dict diff --git a/senlin/objects/base.py b/senlin/objects/base.py deleted file mode 100644 index 24b73cbf1..000000000 --- a/senlin/objects/base.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Senlin common internal object model""" -import re - -from oslo_utils import versionutils -from oslo_versionedobjects import base -from oslo_versionedobjects import fields as base_fields - -from senlin.common.i18n import _ -from senlin import objects - -VersionedObjectDictCompat = base.VersionedObjectDictCompat -VersionedObjectSerializer = base.VersionedObjectSerializer - - -class SenlinObject(base.VersionedObject): - """Base class for senlin objects. - - This is the base class for all objects that can be remoted or instantiated - via RPC. Simply defining a sub-class of this class would make it remotely - instantiatable. Objects should implement the "get" class method and the - "save" object method. - """ - OBJ_SERIAL_NAMESPACE = 'senlin_object' - OBJ_PROJECT_NAMESPACE = 'senlin' - BASE_VERSION = '1.0' - VERSION = '1.0' - - # list of version maps from api request version to object version - # higher api versions after lower api versions. e.g. - # {'1.2': '1.0', '1.4': '1.1'} - VERSION_MAP = {} - - @staticmethod - def _from_db_object(context, obj, db_obj): - if db_obj is None: - return None - for field in obj.fields: - if field == 'metadata': - obj['metadata'] = db_obj['meta_data'] - else: - obj[field] = db_obj[field] - - obj._context = context - obj.obj_reset_changes() - - return obj - - @staticmethod - def _transpose_metadata(values): - """Utility function to translate metadata field.""" - if 'metadata' in values: - value = values.pop('metadata') - values['meta_data'] = value - return values - - @classmethod - def to_json_schema(cls): - obj_name = cls.obj_name() - schema = { - '$schema': 'http://json-schema.org/draft-04/schema#', - 'title': obj_name, - } - - schema.update(base_fields.Object(obj_name).get_schema()) - dataf = cls.OBJ_SERIAL_NAMESPACE + ".data" - schema["properties"][dataf]["additionalProperties"] = False - return schema - - @classmethod - def obj_class_from_name(cls, objname, objver=None): - if objver is None: - objver = cls.VERSION - return super(SenlinObject, cls).obj_class_from_name(objname, objver) - - @classmethod - def find_version(cls, context): - match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", context.api_version) - req_major = int(match.group(1)) - req_minor = int(match.group(2)) - # base version is '1.0' - matched_version = cls.BASE_VERSION - for api_ver in sorted(cls.VERSION_MAP.keys()): - match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", api_ver) - api_major = int(match.group(1)) - api_minor = int(match.group(2)) - if (api_major, api_minor) <= (req_major, req_minor): - matched_version = cls.VERSION_MAP[api_ver] - else: - break - - return matched_version - - @classmethod - def normalize_req(cls, name, req, key=None): - result = { - cls.OBJ_SERIAL_NAMESPACE + '.version': cls.VERSION, - cls.OBJ_SERIAL_NAMESPACE + '.namespace': cls.OBJ_PROJECT_NAMESPACE, - cls.OBJ_SERIAL_NAMESPACE + '.name': name, - } - if key is not None: - if key not in req: - raise ValueError(_("Request body missing '%s' key.") % key) - - result[cls.OBJ_SERIAL_NAMESPACE + '.data'] = { - key: { - cls.OBJ_SERIAL_NAMESPACE + '.version': cls.VERSION, - cls.OBJ_SERIAL_NAMESPACE + '.namespace': - cls.OBJ_PROJECT_NAMESPACE, - cls.OBJ_SERIAL_NAMESPACE + '.name': name + 'Body', - cls.OBJ_SERIAL_NAMESPACE + '.data': req[key] - } - } - else: - result[cls.OBJ_SERIAL_NAMESPACE + '.data'] = req - - return result - - -class SenlinObjectRegistry(base.VersionedObjectRegistry): - - notification_classes = [] - - def registration_hook(self, cls, index): - """Callback for object registration. - - When an object is registered, this function will be called for - maintaining senlin.objects.$OBJECT as the highest-versioned - implementation of a given object. - """ - version = versionutils.convert_version_to_tuple(cls.VERSION) - if not hasattr(objects, cls.obj_name()): - setattr(objects, cls.obj_name(), cls) - else: - curr_version = versionutils.convert_version_to_tuple( - getattr(objects, cls.obj_name()).VERSION) - if version >= curr_version: - setattr(objects, cls.obj_name(), cls) - - @classmethod - def register_notification(cls, notification_cls): - """Register a class as concrete notification. - - This is used only to register concrete notification or payload - classes. Do NOT register base classes intended for inheritance only. - """ - cls.register_if(False)(notification_cls) - cls.notification_classes.append(notification_cls) - return notification_cls diff --git a/senlin/objects/cluster.py b/senlin/objects/cluster.py deleted file mode 100644 index 0eda3d4fd..000000000 --- a/senlin/objects/cluster.py +++ /dev/null @@ -1,163 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cluster object.""" - -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.common import exception as exc -from senlin.common import utils -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Cluster(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin cluster object.""" - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'profile_id': fields.UUIDField(), - 'parent': fields.UUIDField(nullable=True), - 'init_at': fields.DateTimeField(), - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'min_size': fields.IntegerField(nullable=True), - 'max_size': fields.IntegerField(nullable=True), - 'desired_capacity': fields.IntegerField(nullable=True), - 'next_index': fields.IntegerField(nullable=True), - 'timeout': fields.IntegerField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(nullable=True), - 'metadata': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'dependents': fields.JsonField(nullable=True), - 'config': fields.JsonField(nullable=True), - 'profile_name': fields.StringField(), - 'nodes': fields.CustomListField(attr_name='id', nullable=True), - 'policies': fields.CustomListField(attr_name='id', nullable=True), - } - - @staticmethod - def _from_db_object(context, obj, db_obj): - if db_obj is None: - return None - for field in obj.fields: - if field == 'metadata': - obj['metadata'] = db_obj['meta_data'] - elif field == 'profile_name': - obj['profile_name'] = db_obj['profile'].name - else: - obj[field] = db_obj[field] - - obj._context = context - obj.obj_reset_changes() - - return obj - - @classmethod - def create(cls, context, values): - values = cls._transpose_metadata(values) - values['init_at'] = timeutils.utcnow(True) - obj = db_api.cluster_create(context, values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def find(cls, context, identity, project_safe=True): - cluster = None - if uuidutils.is_uuid_like(identity): - cluster = cls.get(context, identity, project_safe=project_safe) - if not cluster: - cluster = cls.get_by_name(context, identity, - project_safe=project_safe) - else: - cluster = cls.get_by_name(context, identity, - project_safe=project_safe) - # maybe it is a short form of UUID - if not cluster: - cluster = cls.get_by_short_id(context, identity, - project_safe=project_safe) - - if not cluster: - raise exc.ResourceNotFound(type='cluster', id=identity) - - return cluster - - @classmethod - def get(cls, context, cluster_id, **kwargs): - obj = db_api.cluster_get(context, cluster_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.cluster_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.cluster_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.cluster_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_next_index(cls, context, cluster_id): - return db_api.cluster_next_index(context, cluster_id) - - @classmethod - def count_all(cls, context, **kwargs): - return db_api.cluster_count_all(context, **kwargs) - - @classmethod - def update(cls, context, obj_id, values): - values = cls._transpose_metadata(values) - values['updated_at'] = timeutils.utcnow(True) - return db_api.cluster_update(context, obj_id, values) - - @classmethod - def delete(cls, context, obj_id): - db_api.cluster_delete(context, obj_id) - - def to_dict(self): - return { - 'id': self.id, - 'name': self.name, - 'profile_id': self.profile_id, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'init_at': utils.isotime(self.init_at), - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'min_size': self.min_size, - 'max_size': self.max_size, - 'desired_capacity': self.desired_capacity, - 'timeout': self.timeout, - 'status': self.status, - 'status_reason': self.status_reason, - 'metadata': self.metadata or {}, - 'data': self.data or {}, - 'dependents': self.dependents or {}, - 'config': self.config or {}, - 'profile_name': self.profile_name, - 'nodes': self.nodes, - 'policies': self.policies - } diff --git a/senlin/objects/cluster_lock.py b/senlin/objects/cluster_lock.py deleted file mode 100644 index 2f3cbdbad..000000000 --- a/senlin/objects/cluster_lock.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cluster lock object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ClusterLock(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin cluster lock object.""" - - fields = { - 'cluster_id': fields.UUIDField(), - 'action_ids': fields.ListOfStringsField(), - 'semaphore': fields.IntegerField(), - } - - @classmethod - def acquire(cls, cluster_id, action_id, scope): - return db_api.cluster_lock_acquire(cluster_id, action_id, scope) - - @classmethod - def is_locked(cls, cluster_id): - return db_api.cluster_is_locked(cluster_id) - - @classmethod - def release(cls, cluster_id, action_id, scope): - return db_api.cluster_lock_release(cluster_id, action_id, scope) - - @classmethod - def steal(cls, cluster_id, action_id): - return db_api.cluster_lock_steal(cluster_id, action_id) diff --git a/senlin/objects/cluster_policy.py b/senlin/objects/cluster_policy.py deleted file mode 100644 index 79faf7d78..000000000 --- a/senlin/objects/cluster_policy.py +++ /dev/null @@ -1,101 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cluster-policy binding object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import cluster as cluster_obj -from senlin.objects import fields -from senlin.objects import policy as policy_obj - - -@base.SenlinObjectRegistry.register -class ClusterPolicy(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin cluster-policy binding object.""" - - fields = { - 'id': fields.UUIDField(), - 'cluster_id': fields.UUIDField(), - 'policy_id': fields.UUIDField(), - 'cluster': fields.ObjectField('Cluster', nullable=True), - 'policy': fields.ObjectField('Policy', nullable=True), - 'enabled': fields.BooleanField(), - 'priority': fields.IntegerField(), - 'data': fields.JsonField(nullable=True), - 'last_op': fields.DateTimeField(nullable=True), - } - - @staticmethod - def _from_db_object(context, binding, db_obj): - if db_obj is None: - return None - for field in binding.fields: - if field == 'cluster': - c = cluster_obj.Cluster.get(context, db_obj['cluster_id']) - binding['cluster'] = c - elif field == 'policy': - p = policy_obj.Policy.get(context, db_obj['policy_id']) - binding['policy'] = p - else: - binding[field] = db_obj[field] - - binding._context = context - binding.obj_reset_changes() - - return binding - - @classmethod - def create(cls, context, cluster_id, policy_id, values): - obj = db_api.cluster_policy_attach(context, cluster_id, policy_id, - values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def get(cls, context, cluster_id, policy_id): - obj = db_api.cluster_policy_get(context, cluster_id, policy_id) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_type(cls, context, cluster_id, policy_type, filters=None): - objs = db_api.cluster_policy_get_by_type(context, cluster_id, - policy_type, filters=filters) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_all(cls, context, cluster_id, **kwargs): - objs = db_api.cluster_policy_get_all(context, cluster_id, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def update(cls, context, cluster_id, policy_id, values): - db_api.cluster_policy_update(context, cluster_id, policy_id, values) - - @classmethod - def delete(cls, context, cluster_id, policy_id): - db_api.cluster_policy_detach(context, cluster_id, policy_id) - - def to_dict(self): - binding_dict = { - 'id': self.id, - 'cluster_id': self.cluster.id, - 'policy_id': self.policy.id, - 'enabled': self.enabled, - 'data': self.data, - 'last_op': self.last_op, - 'priority': self.priority, - # below are derived data for user's convenience - 'cluster_name': self.cluster.name, - 'policy_name': self.policy.name, - 'policy_type': self.policy.type, - } - return binding_dict diff --git a/senlin/objects/credential.py b/senlin/objects/credential.py deleted file mode 100644 index 483d29823..000000000 --- a/senlin/objects/credential.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Credential object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Credential(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin credential object.""" - - fields = { - 'user': fields.StringField(), - 'project': fields.StringField(), - 'cred': fields.JsonField(), - 'data': fields.JsonField(nullable=True), - } - - @classmethod - def create(cls, context, values): - obj = db_api.cred_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def get(cls, context, user, project): - obj = db_api.cred_get(context, user, project) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def update(cls, context, user, project, values): - obj = db_api.cred_update(context, user, project, values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def delete(cls, context, user, project): - return db_api.cred_delete(context, user, project) - - @classmethod - def update_or_create(cls, context, values): - obj = db_api.cred_create_update(context, values) - return cls._from_db_object(context, cls(), obj) diff --git a/senlin/objects/dependency.py b/senlin/objects/dependency.py deleted file mode 100644 index 7186c8746..000000000 --- a/senlin/objects/dependency.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Action dependency object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Dependency(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin action dependency object.""" - - fields = { - 'id': fields.UUIDField(), - 'depended': fields.UUIDField(), - 'dependent': fields.UUIDField(), - } - - @classmethod - def create(cls, context, depended, dependent): - return db_api.dependency_add(context, depended, dependent) - - @classmethod - def get_depended(cls, context, action_id): - return db_api.dependency_get_depended(context, action_id) - - @classmethod - def get_dependents(cls, context, action_id): - return db_api.dependency_get_dependents(context, action_id) diff --git a/senlin/objects/event.py b/senlin/objects/event.py deleted file mode 100644 index 83ba3260a..000000000 --- a/senlin/objects/event.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Event object.""" - -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Event(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin event object.""" - - fields = { - 'id': fields.UUIDField(), - 'timestamp': fields.DateTimeField(), - 'oid': fields.UUIDField(), - 'oname': fields.StringField(), - 'otype': fields.StringField(), - 'cluster_id': fields.StringField(nullable=True), - 'level': fields.StringField(), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'action': fields.StringField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(), - 'metadata': fields.JsonField(nullable=True), - } - - @classmethod - def create(cls, context, values): - obj = db_api.event_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def find(cls, context, identity, **kwargs): - """Find an event with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of the event. - :param dict kwargs: Other keyword query parameters. - - :return: A dictionary containing the details of the event. - """ - event = None - if uuidutils.is_uuid_like(identity): - event = cls.get(context, identity, **kwargs) - if not event: - event = cls.get_by_short_id(context, identity, **kwargs) - if not event: - raise exception.ResourceNotFound(type='event', id=identity) - - return event - - @classmethod - def get(cls, context, event_id, **kwargs): - return db_api.event_get(context, event_id, **kwargs) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - return db_api.event_get_by_short_id(context, short_id, **kwargs) - - @classmethod - def get_all(cls, context, **kwargs): - return db_api.event_get_all(context, **kwargs) - - @classmethod - def count_by_cluster(cls, context, cluster_id, **kwargs): - return db_api.event_count_by_cluster(context, cluster_id, **kwargs) - - @classmethod - def get_all_by_cluster(cls, context, cluster_id, **kwargs): - objs = db_api.event_get_all_by_cluster(context, cluster_id, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] diff --git a/senlin/objects/fields.py b/senlin/objects/fields.py deleted file mode 100644 index 1c4777590..000000000 --- a/senlin/objects/fields.py +++ /dev/null @@ -1,534 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import strutils -from oslo_utils import uuidutils -from oslo_versionedobjects import fields -import re - -from senlin.common import consts -from senlin.common.i18n import _ - -CONF = cfg.CONF - -# Field alias for code readability -# BooleanField = fields.BooleanField -FlexibleBooleanField = fields.FlexibleBooleanField -StringField = fields.StringField -IntegerField = fields.IntegerField -FloatField = fields.FloatField -UUIDField = fields.UUIDField -DateTimeField = fields.DateTimeField -DictOfStringsField = fields.DictOfStringsField -ListOfStringsField = fields.ListOfStringsField -ListOfEnumField = fields.ListOfEnumField - - -class Boolean(fields.FieldType): - # NOTE: The following definition is much more stricter than the oslo - # version. Also note that the treatment of default values here: - # we are using the user specified default value when invoking - # the 'bool_from_string' until function. - - def __init__(self, default=False): - super(Boolean, self).__init__() - self._default = default - - def coerce(self, obj, attr, value): - return strutils.bool_from_string(value, strict=True, - default=self._default) - - def get_schema(self): - return {'type': ['boolean']} - - -class NonNegativeInteger(fields.FieldType): - # NOTE: This definition is kept because we want the error message from - # 'int' conversion to be user friendly. - @staticmethod - def coerce(obj, attr, value): - try: - v = int(value) - except (TypeError, ValueError): - raise ValueError(_("The value for %(attr)s must be an integer: " - "'%(value)s'.") % - {'attr': attr, 'value': value}) - if v < 0: - err = _("Value must be >= 0 for field '%s'.") % attr - raise ValueError(err) - return v - - def get_schema(self): - return { - 'type': ['integer', 'string'], - 'minimum': 0 - } - - -# Senlin has a stricter field checking for object fields. -class Object(fields.Object): - - def get_schema(self): - schema = super(Object, self).get_schema() - # we are not checking whether self._obj_name is registered, an - # exception will be raised anyway if it is not registered. - data_key = 'senlin_object.data' - schema['properties'][data_key]['additionalProperties'] = False - return schema - - -class UUID(fields.FieldType): - - _PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]' - r'{4}-?[a-fA-F0-9]{12}$') - - @staticmethod - def coerce(obj, attr, value): - if not uuidutils.is_uuid_like(value): - msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'." - ) % {'attr': attr, 'value': value} - raise ValueError(msg) - - return str(value) - - def get_schema(self): - return {'type': ['string'], 'pattern': self._PATTERN} - - -class Json(fields.FieldType): - def coerce(self, obj, attr, value): - if isinstance(value, str): - try: - return jsonutils.loads(value) - except ValueError: - msg = _("The value (%s) is not a valid JSON.") % value - raise ValueError(msg) - return value - - def from_primitive(self, obj, attr, value): - return self.coerce(obj, attr, value) - - def to_primitive(self, obj, attr, value): - return jsonutils.dumps(value) - - def stringify(self, value): - if isinstance(value, str): - try: - return jsonutils.loads(value) - except ValueError: - raise - return str(value) - - def get_schema(self): - return {'type': ['object']} - - -class NotificationPriority(fields.Enum): - - # The priorities here are derived from oslo_messaging.notify.notifier - ALL = consts.NOTIFICATION_PRIORITIES - - def __init__(self): - super(NotificationPriority, self).__init__(self.ALL) - - -class NotificationPhase(fields.Enum): - - ALL = consts.NOTIFICATION_PHASES - - def __init__(self): - super(NotificationPhase, self).__init__(self.ALL) - - -class Name(fields.String): - - def __init__(self, min_len=1, max_len=255): - super(Name, self).__init__() - self.min_len = min_len - self.max_len = max_len - - def coerce(self, obj, attr, value): - err = None - if len(value) < self.min_len: - err = _("The value for the %(attr)s field must be at least " - "%(count)d characters long." - ) % {'attr': attr, 'count': self.min_len} - elif len(value) > self.max_len: - err = _("The value for the %(attr)s field must be less than " - "%(count)d characters long." - ) % {'attr': attr, 'count': self.max_len} - else: - # NOTE: This is pretty restrictive. We can relax it later when - # there are requests to do so - regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$', - re.IGNORECASE) - if not regex.search(value): - err = _("The value for the '%(attr)s' (%(value)s) contains " - "illegal characters. It must contain only " - "alphanumeric or \"_-.~\" characters and must start " - "with letter." - ) % {'attr': attr, 'value': value} - - if err: - raise ValueError(err) - - return super(Name, self).coerce(obj, attr, value) - - def get_schema(self): - return { - 'type': ['string'], - 'minLength': self.min_len, - 'maxLength': self.max_len - } - - -class Capacity(fields.Integer): - - def __init__(self, minimum=0, maximum=None): - super(Capacity, self).__init__() - CONF.import_opt("max_nodes_per_cluster", "senlin.conf") - - if minimum > CONF.max_nodes_per_cluster: - err = _("The value of 'minimum' cannot be greater than the global " - "constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster} - raise ValueError(err) - self.minimum = minimum - - if maximum is not None: - if maximum < minimum: - err = _("The value of 'maximum' must be greater than or equal " - "to that of the 'minimum' specified.") - raise ValueError(err) - - if maximum > CONF.max_nodes_per_cluster: - err = _("The value of 'maximum' cannot be greater than the " - "global constraint (%(m)d)." - ) % {'m': CONF.max_nodes_per_cluster} - raise ValueError(err) - - self.maximum = maximum - else: - self.maximum = CONF.max_nodes_per_cluster - - def coerce(self, obj, attr, value): - try: - v = int(value) - except Exception: - raise ValueError(_("The value for %(attr)s must be an integer: " - "'%(value)s'.") % - {'attr': attr, 'value': value}) - if v < self.minimum: - raise ValueError(_("The value for the %(a)s field must be greater " - "than or equal to %(n)d.") % - {'a': attr, 'n': self.minimum}) - elif v > self.maximum: - raise ValueError(_("The value for the %(a)s field must be less " - "than or equal to %(n)d.") % - {'a': attr, 'n': self.maximum}) - return super(Capacity, self).coerce(obj, attr, v) - - def get_schema(self): - return { - 'type': ['integer', 'string'], - 'minimum': self.minimum, - 'maximum': self.maximum, - 'pattern': '^[0-9]*$', - } - - -class Sort(fields.String): - - def __init__(self, valid_keys): - super(Sort, self).__init__() - self.valid_keys = valid_keys - - def coerce(self, obj, attr, value): - for s in value.split(','): - s_key, _sep, s_dir = s.partition(':') - err = None - if not s_key: - err = _("Missing sort key for '%s'.") % attr - raise ValueError(err) - - if s_key not in self.valid_keys: - err = _("Unsupported sort key '%(value)s' for '%(attr)s'." - ) % {'attr': attr, 'value': s_key} - - if s_dir and s_dir not in ('asc', 'desc'): - err = _("Unsupported sort dir '%(value)s' for '%(attr)s'." - ) % {'attr': attr, 'value': s_dir} - - if err: - raise ValueError(err) - - return super(Sort, self).coerce(obj, attr, value) - - def get_schema(self): - return { - 'type': ['string'], - } - - -class IdentityList(fields.List): - - def __init__(self, element_type, min_items=0, unique=True, nullable=False, - **kwargs): - super(IdentityList, self).__init__(element_type, **kwargs) - self.min_items = min_items - self.unique_items = unique - self.nullable = nullable - - def coerce(self, obj, attr, value): - res = super(IdentityList, self).coerce(obj, attr, value) - if len(res) < self.min_items: - raise ValueError(_("Value for '%(attr)s' must have at least " - "%(num)s item(s).") % - {'attr': attr, 'num': self.min_items}) - if len(set(res)) != len(res) and self.unique_items: - raise ValueError(_("Items for '%(attr)s' must be unique") % - {'attr': attr}) - return res - - def get_schema(self): - schema = super(IdentityList, self).get_schema() - if self.nullable: - schema['type'].append('null') - schema['minItems'] = self.min_items - schema['uniqueItems'] = self.unique_items - return schema - - -class BaseEnum(fields.FieldType): - # NOTE: We are not basing Enum on String because String is not working - # correctly when handling None value. - def __init__(self, nullable=False): - valid_values = list(self.__class__.ALL) - if not valid_values: - raise ValueError(_("No list of valid values provided for enum.")) - - for value in valid_values: - if not isinstance(value, str): - raise ValueError(_("Enum field only support string values.")) - - self._valid_values = list(valid_values) - self._nullable = nullable - super(BaseEnum, self).__init__() - - def coerce(self, obj, attr, value): - value = str(value) - if value not in self._valid_values: - raise ValueError(_("Value '%(value)s' is not acceptable for " - "field '%(attr)s'.") % - {'value': value, 'attr': attr}) - return value - - def stringify(self, value): - if value is None: - return None - return '\'%s\'' % value - - -class AdjustmentType(BaseEnum): - - ALL = consts.ADJUSTMENT_TYPES - - def get_schema(self): - return {'type': ['string'], - 'enum': self._valid_values} - - -class ClusterActionName(BaseEnum): - - ALL = consts.CLUSTER_ACTION_NAMES - - def get_schema(self): - return {'type': ['string'], - 'enum': self._valid_values} - - -class ClusterStatus(BaseEnum): - - ALL = consts.CLUSTER_STATUSES - - -class NodeStatus(BaseEnum): - - ALL = consts.NODE_STATUSES - - -class ActionStatus(BaseEnum): - - ALL = consts.ACTION_STATUSES - - -class ReceiverType(BaseEnum): - - ALL = consts.RECEIVER_TYPES - - def get_schema(self): - return {'type': ['string'], - 'enum': self._valid_values} - - -class UniqueDict(fields.Dict): - - def coerce(self, obj, attr, value): - res = super(UniqueDict, self).coerce(obj, attr, value) - new_nodes = res.values() - if len(new_nodes) != len(set(new_nodes)): - raise ValueError(_("Map contains duplicated values")) - return res - - -# TODO(Qiming): remove this when oslo patch is released -# https://review.openstack.org/#/c/360095 -class NonNegativeIntegerField(fields.AutoTypedField): - - AUTO_TYPE = NonNegativeInteger() - - -class BooleanField(fields.AutoTypedField): - - AUTO_TYPE = Boolean() - - -# An override to the oslo.versionedobjects version so that we are using -# our own Object definition. -class ObjectField(fields.AutoTypedField): - - def __init__(self, objtype, subclasses=False, **kwargs): - self.AUTO_TYPE = Object(objtype, subclasses) - self.objname = objtype - super(ObjectField, self).__init__(**kwargs) - - -class JsonField(fields.AutoTypedField): - AUTO_TYPE = Json() - - -class ListField(fields.AutoTypedField): - AUTO_TYPE = fields.List(fields.FieldType()) - - -class NotificationPriorityField(fields.BaseEnumField): - AUTO_TYPE = NotificationPriority() - - -class NotificationPhaseField(fields.BaseEnumField): - AUTO_TYPE = NotificationPhase() - - -class NameField(fields.AutoTypedField): - - AUTO_TYPE = Name() - - -class UUIDField(fields.AutoTypedField): - - AUTO_TYPE = UUID() - - -class CapacityField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, nullable=False, default=None, minimum=0, maximum=None): - self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum) - super(CapacityField, self).__init__(nullable=nullable, default=default) - - -class SortField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, valid_keys, nullable=False, default=None): - self.AUTO_TYPE = Sort(valid_keys) - super(SortField, self).__init__(nullable=nullable, default=default) - - -class IdentityListField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, min_items=0, unique=True, nullable=False, default=None): - if default is None: - default = [] - self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items, - unique=unique) - super(IdentityListField, self).__init__(nullable=nullable, - default=default) - - -class AdjustmentTypeField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, **kwargs): - nullable = kwargs.get('nullable', False) - self.AUTO_TYPE = AdjustmentType(nullable=nullable) - super(AdjustmentTypeField, self).__init__(**kwargs) - - -class ClusterActionNameField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, **kwargs): - nullable = kwargs.get('nullable', False) - self.AUTO_TYPE = ClusterActionName(nullable=nullable) - super(ClusterActionNameField, self).__init__(**kwargs) - - -class ClusterStatusField(fields.AutoTypedField): - - AUTO_TYPE = ClusterStatus - - -class NodeStatusField(fields.AutoTypedField): - - AUTO_TYPE = NodeStatus - - -class ActionStatusField(fields.AutoTypedField): - - AUTO_TYPE = ActionStatus - - -class ReceiverTypeField(fields.AutoTypedField): - - AUTO_TYPE = None - - def __init__(self, **kwargs): - nullable = kwargs.get('nullable', False) - self.AUTO_TYPE = ReceiverType(nullable=nullable) - super(ReceiverTypeField, self).__init__(**kwargs) - - -class NodeReplaceMapField(fields.AutoTypedField): - - AUTO_TYPE = UniqueDict(fields.String()) - - -class CustomListField(ListField): - - def __init__(self, attr_name, **kwargs): - self.attr_name = attr_name - super(CustomListField, self).__init__(**kwargs) - - def coerce(self, obj, attr, value): - objs = super(CustomListField, self).coerce(obj, attr, value) - custom_list = [] - for i in objs: - custom_list.append(getattr(i, self.attr_name)) - return custom_list diff --git a/senlin/objects/health_registry.py b/senlin/objects/health_registry.py deleted file mode 100644 index d706cb7ec..000000000 --- a/senlin/objects/health_registry.py +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Health registry object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class HealthRegistry(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin health registry object.""" - - fields = { - 'id': fields.UUIDField(), - 'cluster_id': fields.UUIDField(), - 'check_type': fields.StringField(), - 'interval': fields.IntegerField(nullable=True), - 'params': fields.JsonField(nullable=True), - 'engine_id': fields.UUIDField(), - 'enabled': fields.BooleanField(), - } - - @classmethod - def create(cls, context, cluster_id, check_type, interval, params, - engine_id, enabled=True): - obj = db_api.registry_create(context, cluster_id, check_type, - interval, - params, engine_id, - enabled=enabled) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def update(cls, context, cluster_id, values): - db_api.registry_update(context, cluster_id, values) - - @classmethod - def claim(cls, context, engine_id): - objs = db_api.registry_claim(context, engine_id) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def delete(cls, context, cluster_id): - db_api.registry_delete(context, cluster_id) - - @classmethod - def get(cls, context, cluster_id): - obj = db_api.registry_get(context, cluster_id) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_engine(cls, context, engine_id, cluster_id): - params = { - "cluster_id": cluster_id, - "engine_id": engine_id, - } - obj = db_api.registry_get_by_param(context, params) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def disable_registry(cls, context, cluster_id): - cls.update(context, cluster_id, {'enabled': False}) - - @classmethod - def enable_registry(cls, context, cluster_id): - cls.update(context, cluster_id, {"enabled": True}) diff --git a/senlin/objects/node.py b/senlin/objects/node.py deleted file mode 100644 index 512a63353..000000000 --- a/senlin/objects/node.py +++ /dev/null @@ -1,189 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Node object.""" - -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.common import utils -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Node(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin node object.""" - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'profile_id': fields.UUIDField(), - 'cluster_id': fields.StringField(), - 'physical_id': fields.StringField(nullable=True), - 'index': fields.IntegerField(), - 'role': fields.StringField(nullable=True), - 'init_at': fields.DateTimeField(), - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(nullable=True), - 'metadata': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'dependents': fields.JsonField(nullable=True), - 'profile_name': fields.StringField(nullable=True), - 'profile_created_at': fields.StringField(nullable=True), - 'tainted': fields.BooleanField(), - } - - @staticmethod - def _from_db_object(context, obj, db_obj): - if db_obj is None: - return None - for field in obj.fields: - if field == 'metadata': - obj['metadata'] = db_obj['meta_data'] - elif field == 'profile_name': - p = db_obj['profile'] - obj['profile_name'] = p.name if p else 'Unknown' - elif field == 'profile_created_at': - p = db_obj['profile'] - obj['profile_created_at'] = p.created_at if p else None - elif field == 'tainted': - p = db_obj[field] - obj[field] = p if p else False - else: - obj[field] = db_obj[field] - - obj._context = context - obj.obj_reset_changes() - - return obj - - @classmethod - def create(cls, context, values): - values = cls._transpose_metadata(values) - obj = db_api.node_create(context, values) - # NOTE: We need an extra DB call to make sure the profile is loaded - # and bound to the node created. - obj = db_api.node_get(context, obj.id) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def find(cls, context, identity, project_safe=True): - """Find a node with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of a node. - :param project_safe: A boolean indicating whether only nodes from the - same project as the requesting one are qualified - to be returned. - :return: A DB object of Node. - :raises: An exception of ``ResourceNotFound`` if no matching node is - or an exception of ``MultipleChoices`` more than one node - found matching the criteria. - """ - node = None - if uuidutils.is_uuid_like(identity): - node = cls.get(context, identity, project_safe=project_safe) - if not node: - node = cls.get_by_name(context, identity, - project_safe=project_safe) - else: - node = cls.get_by_name(context, identity, - project_safe=project_safe) - if not node: - node = cls.get_by_short_id(context, identity, - project_safe=project_safe) - - if node is None: - raise exception.ResourceNotFound(type='node', id=identity) - - return node - - @classmethod - def get(cls, context, node_id, **kwargs): - obj = db_api.node_get(context, node_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.node_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.node_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.node_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def get_all_by_cluster(cls, context, cluster_id, filters=None, - project_safe=True): - objs = db_api.node_get_all_by_cluster( - context, cluster_id, filters=filters, project_safe=project_safe) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def ids_by_cluster(cls, context, cluster_id, filters=None): - """An internal API for retrieving node ids only.""" - return db_api.node_ids_by_cluster(context, cluster_id, filters=filters) - - @classmethod - def count_by_cluster(cls, context, cluster_id, **kwargs): - return db_api.node_count_by_cluster(context, cluster_id, **kwargs) - - @classmethod - def update(cls, context, obj_id, values): - values = cls._transpose_metadata(values) - db_api.node_update(context, obj_id, values) - - @classmethod - def migrate(cls, context, obj_id, to_cluster, timestamp, role=None): - return db_api.node_migrate(context, obj_id, to_cluster, timestamp, - role=role) - - @classmethod - def delete(cls, context, obj_id): - return db_api.node_delete(context, obj_id) - - def to_dict(self): - return { - 'id': self.id, - 'name': self.name, - 'cluster_id': self.cluster_id, - 'physical_id': self.physical_id, - 'profile_id': self.profile_id, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'index': self.index, - 'role': self.role, - 'init_at': utils.isotime(self.init_at), - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'status': self.status, - 'status_reason': self.status_reason, - 'data': self.data, - 'metadata': self.metadata, - 'dependents': self.dependents, - 'profile_name': self.profile_name, - 'tainted': self.tainted, - } diff --git a/senlin/objects/node_lock.py b/senlin/objects/node_lock.py deleted file mode 100644 index 8be63b0c7..000000000 --- a/senlin/objects/node_lock.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Node lock object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class NodeLock(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin node lock object.""" - - fields = { - 'node_id': fields.UUIDField(), - 'action_id': fields.UUIDField(), - } - - @classmethod - def acquire(cls, node_id, action_id): - return db_api.node_lock_acquire(node_id, action_id) - - @classmethod - def is_locked(cls, cluster_id): - return db_api.node_is_locked(cluster_id) - - @classmethod - def release(cls, node_id, action_id): - return db_api.node_lock_release(node_id, action_id) - - @classmethod - def steal(cls, node_id, action_id): - return db_api.node_lock_steal(node_id, action_id) diff --git a/senlin/objects/notification.py b/senlin/objects/notification.py deleted file mode 100644 index 46c19f440..000000000 --- a/senlin/objects/notification.py +++ /dev/null @@ -1,283 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -from senlin.common import messaging -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register_if(False) -class NotificationObject(base.SenlinObject): - """Base class for all notification related versioned objects.""" - VERSION = '1.0' - - def __init__(self, **kwargs): - # The notification objects are created on the fly so every field is - # shown as changed. We reset the object after creation to avoid - # sending such meaningless information. - super(NotificationObject, self).__init__(**kwargs) - self.obj_reset_changes(recursive=False) - - -@base.SenlinObjectRegistry.register_notification -class EventType(NotificationObject): - VERSION = '1.0' - - fields = { - 'object': fields.StringField(nullable=False), - 'action': fields.StringField(nullable=False), - 'phase': fields.NotificationPhaseField(nullable=True), - } - - def to_notification_field(self): - """Serialize the object to the wire format.""" - s = '%s.%s' % (self.object, self.action) - if self.obj_attr_is_set('phase'): - s += '.%s' % self.phase - return s - - -@base.SenlinObjectRegistry.register_notification -class NotificationPublisher(NotificationObject): - VERSION = '1.0' - - fields = { - 'host': fields.StringField(), - 'binary': fields.StringField(), - } - - @classmethod - def from_service(cls, service): - return cls(host=service.host, binary=service.binary) - - @property - def publisher_id(self): - return '%s:%s' % (self.binary, self.host) - - -@base.SenlinObjectRegistry.register_if(False) -class NotificationBase(NotificationObject): - """Base class for versioned notifications. - - Every subclass shall define a 'payload' field. - """ - VERSION = '1.0' - - fields = { - 'priority': fields.NotificationPriorityField(), - 'event_type': fields.ObjectField('EventType'), - 'publisher': fields.ObjectField('NotificationPublisher'), - } - - def _emit(self, context, event_type, publisher_id, payload): - notifier = messaging.get_notifier(publisher_id) - notify = getattr(notifier, self.priority) - notify(context, event_type, payload) - - def emit(self, context): - """Send the notification.""" - self.payload.obj_reset_changes(recursive=False) - self._emit(context, - self.event_type.to_notification_field(), - self.publisher.publisher_id, - self.payload.obj_to_primitive()) - - -@base.SenlinObjectRegistry.register_notification -class ExceptionPayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'module': fields.StringField(), - 'function': fields.StringField(), - 'exception': fields.StringField(), - 'message': fields.StringField(), - } - - @classmethod - def from_exception(cls, exc): - if exc is None: - return None - trace = inspect.trace()[-1] - module = inspect.getmodule(trace[0]) - module_name = module.__name__ if module else 'unknown' - return cls(function=trace[3], module=module_name, - exception=exc.__class__.__name__, - message=str(exc)) - - -@base.SenlinObjectRegistry.register_notification -class ClusterPayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'profile_id': fields.UUIDField(), - 'init_at': fields.DateTimeField(), - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'min_size': fields.IntegerField(), - 'max_size': fields.IntegerField(), - 'desired_capacity': fields.IntegerField(), - 'timeout': fields.IntegerField(), - 'status': fields.StringField(), - 'status_reason': fields.StringField(), - 'metadata': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'dependents': fields.JsonField(nullable=True), - } - - @classmethod - def from_cluster(cls, cluster): - values = {} - for field in cls.fields: - values[field] = getattr(cluster, field) - obj = cls(**values) - obj.obj_reset_changes(recursive=False) - return obj - - -@base.SenlinObjectRegistry.register_notification -class NodePayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'profile_id': fields.UUIDField(), - 'cluster_id': fields.StringField(), - 'physical_id': fields.StringField(nullable=True), - 'index': fields.IntegerField(), - 'role': fields.StringField(nullable=True), - 'init_at': fields.DateTimeField(), - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(), - 'metadata': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'dependents': fields.JsonField(nullable=True), - } - - @classmethod - def from_node(cls, node): - values = {} - for field in cls.fields: - values[field] = getattr(node, field) - obj = cls(**values) - obj.obj_reset_changes(recursive=False) - return obj - - -@base.SenlinObjectRegistry.register_notification -class ActionPayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'created_at': fields.DateTimeField(nullable=True), - 'target': fields.UUIDField(), - 'action': fields.StringField(), - 'start_time': fields.FloatField(), - 'end_time': fields.FloatField(nullable=True), - 'timeout': fields.IntegerField(nullable=True), - 'status': fields.StringField(), - 'status_reason': fields.StringField(), - 'inputs': fields.JsonField(nullable=True), - 'outputs': fields.JsonField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - } - - @classmethod - def from_action(cls, action): - values = {} - for field in cls.fields: - values[field] = getattr(action, field) - obj = cls(**values) - obj.obj_reset_changes(recursive=False) - return obj - - -@base.SenlinObjectRegistry.register_notification -class ClusterActionPayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'cluster': fields.ObjectField('ClusterPayload'), - 'action': fields.ObjectField('ActionPayload'), - 'exception': fields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, cluster, action, **kwargs): - ex = kwargs.pop('exception', None) - super(ClusterActionPayload, self).__init__( - cluster=ClusterPayload.from_cluster(cluster), - action=ActionPayload.from_action(action), - exception=ex, - **kwargs) - - -@base.SenlinObjectRegistry.register_notification -class NodeActionPayload(NotificationObject): - - VERSION = '1.0' - - fields = { - 'node': fields.ObjectField('NodePayload'), - 'action': fields.ObjectField('ActionPayload'), - 'exception': fields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, node, action, **kwargs): - ex = kwargs.pop('exception', None) - super(NodeActionPayload, self).__init__( - node=NodePayload.from_node(node), - action=ActionPayload.from_action(action), - exception=ex, - **kwargs) - - -@base.SenlinObjectRegistry.register_notification -class ClusterActionNotification(NotificationBase): - - VERSION = '1.0' - - fields = { - 'payload': fields.ObjectField('ClusterActionPayload') - } - - -@base.SenlinObjectRegistry.register_notification -class NodeActionNotification(NotificationBase): - - VERSION = '1.0' - - fields = { - 'payload': fields.ObjectField('NodeActionPayload') - } diff --git a/senlin/objects/policy.py b/senlin/objects/policy.py deleted file mode 100644 index de53ac1f2..000000000 --- a/senlin/objects/policy.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy object.""" - -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Policy(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin policy object.""" - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'type': fields.StringField(), - 'spec': fields.JsonField(), - 'cooldown': fields.IntegerField(nullable=True), - 'level': fields.IntegerField(nullable=True), - 'data': fields.JsonField(nullable=True), - 'created_at': fields.DateTimeField(), - 'updated_at': fields.DateTimeField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - } - - @classmethod - def create(cls, context, values): - values = cls._transpose_metadata(values) - obj = db_api.policy_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def find(cls, context, identity, **kwargs): - """Find a policy with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of a profile. - :param project_safe: A boolean indicating whether policies from - projects other than the requesting one should be - evaluated. - :return: A DB object of policy or an exception of `ResourceNotFound` - if no matching object is found. - """ - if uuidutils.is_uuid_like(identity): - policy = cls.get(context, identity, **kwargs) - if not policy: - policy = cls.get_by_name(context, identity, **kwargs) - else: - policy = cls.get_by_name(context, identity, **kwargs) - if not policy: - policy = cls.get_by_short_id(context, identity, **kwargs) - - if not policy: - raise exception.ResourceNotFound(type='policy', id=identity) - - return policy - - @classmethod - def get(cls, context, policy_id, **kwargs): - obj = db_api.policy_get(context, policy_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.policy_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.policy_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.policy_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def update(cls, context, obj_id, values): - values = cls._transpose_metadata(values) - obj = db_api.policy_update(context, obj_id, values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def delete(cls, context, obj_id): - db_api.policy_delete(context, obj_id) - - def to_dict(self): - policy_dict = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'spec': self.spec, - 'created_at': self.created_at, - 'updated_at': self.updated_at, - 'data': self.data - } - return policy_dict diff --git a/senlin/objects/profile.py b/senlin/objects/profile.py deleted file mode 100644 index 1c3836841..000000000 --- a/senlin/objects/profile.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Profile object.""" -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.common import utils -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Profile(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin profile object.""" - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'type': fields.StringField(), - 'context': fields.JsonField(), - 'spec': fields.JsonField(), - 'created_at': fields.DateTimeField(), - 'updated_at': fields.DateTimeField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - 'permission': fields.StringField(nullable=True), - 'metadata': fields.JsonField(nullable=True), - } - - @classmethod - def create(cls, context, values): - values = cls._transpose_metadata(values) - obj = db_api.profile_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def find(cls, context, identity, **kwargs): - """Find a profile with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of a profile. - :param project_safe: A boolean indicating whether profile from - projects other than the requesting one can be - returned. - :return: A DB object of profile or an exception `ResourceNotFound` - if no matching object is found. - """ - if uuidutils.is_uuid_like(identity): - profile = cls.get(context, identity, **kwargs) - if not profile: - profile = cls.get_by_name(context, identity, **kwargs) - else: - profile = cls.get_by_name(context, identity, **kwargs) - if not profile: - profile = cls.get_by_short_id(context, identity, **kwargs) - - if not profile: - raise exception.ResourceNotFound(type='profile', id=identity) - - return profile - - @classmethod - def get(cls, context, profile_id, **kwargs): - obj = db_api.profile_get(context, profile_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.profile_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.profile_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.profile_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def update(cls, context, obj_id, values): - values = cls._transpose_metadata(values) - obj = db_api.profile_update(context, obj_id, values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def delete(cls, context, obj_id): - db_api.profile_delete(context, obj_id) - - def to_dict(self): - profile_dict = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'spec': self.spec, - 'metadata': self.metadata, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at) - } - return profile_dict diff --git a/senlin/objects/receiver.py b/senlin/objects/receiver.py deleted file mode 100644 index 89a450059..000000000 --- a/senlin/objects/receiver.py +++ /dev/null @@ -1,121 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Receiver object.""" - -from oslo_utils import uuidutils - -from senlin.common import exception -from senlin.common import utils -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Receiver(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin receiver object.""" - - fields = { - 'id': fields.UUIDField(), - 'name': fields.StringField(), - 'type': fields.StringField(), - 'cluster_id': fields.StringField(nullable=True), - 'actor': fields.JsonField(nullable=True), - 'action': fields.StringField(nullable=True), - 'params': fields.JsonField(nullable=True), - 'channel': fields.JsonField(nullable=True), - 'created_at': fields.DateTimeField(nullable=True), - 'updated_at': fields.DateTimeField(nullable=True), - 'user': fields.StringField(), - 'project': fields.StringField(), - 'domain': fields.StringField(nullable=True), - } - - @classmethod - def create(cls, context, values): - obj = db_api.receiver_create(context, values) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def find(cls, context, identity, **kwargs): - """Find a receiver with the given identity. - - :param context: An instance of the request context. - :param identity: The UUID, name or short-id of a receiver. - :param project_safe: A boolean indicating whether receiver from other - projects other than the requesting one can be - returned. - :return: A DB object of receiver or an exception `ResourceNotFound` - if no matching receiver is found. - """ - if uuidutils.is_uuid_like(identity): - receiver = cls.get(context, identity, **kwargs) - if not receiver: - receiver = cls.get_by_name(context, identity, **kwargs) - else: - receiver = cls.get_by_name(context, identity, **kwargs) - if not receiver: - receiver = cls.get_by_short_id(context, identity, **kwargs) - - if not receiver: - raise exception.ResourceNotFound(type='receiver', id=identity) - - return receiver - - @classmethod - def get(cls, context, receiver_id, **kwargs): - obj = db_api.receiver_get(context, receiver_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_name(cls, context, name, **kwargs): - obj = db_api.receiver_get_by_name(context, name, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_by_short_id(cls, context, short_id, **kwargs): - obj = db_api.receiver_get_by_short_id(context, short_id, **kwargs) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context, **kwargs): - objs = db_api.receiver_get_all(context, **kwargs) - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def update(cls, context, receiver_id, values): - values = cls._transpose_metadata(values) - obj = db_api.receiver_update(context, receiver_id, values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def delete(cls, context, receiver_id): - db_api.receiver_delete(context, receiver_id) - - def to_dict(self): - receiver_dict = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'cluster_id': self.cluster_id, - 'actor': self.actor, - 'action': self.action, - 'params': self.params, - 'channel': self.channel, - } - return receiver_dict diff --git a/senlin/objects/requests/__init__.py b/senlin/objects/requests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/objects/requests/actions.py b/senlin/objects/requests/actions.py deleted file mode 100644 index a7ae65b9d..000000000 --- a/senlin/objects/requests/actions.py +++ /dev/null @@ -1,96 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ActionCreateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField(), - 'cluster_id': fields.StringField(), - 'action': fields.StringField(), - 'inputs': fields.JsonField(nullable=True, default={}), - } - - -@base.SenlinObjectRegistry.register -class ActionCreateRequest(base.SenlinObject): - - fields = { - 'action': fields.ObjectField('ActionCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class ActionListRequest(base.SenlinObject): - action_name_list = list(consts.CLUSTER_ACTION_NAMES) - action_name_list.extend(list(consts.NODE_ACTION_NAMES)) - - VERSION = '1.1' - VERSION_MAP = { - '1.14': '1.1' - } - - fields = { - 'name': fields.ListOfStringsField(nullable=True), - 'cluster_id': fields.ListOfStringsField(nullable=True), - 'action': fields.ListOfEnumField( - valid_values=action_name_list, nullable=True), - 'target': fields.ListOfStringsField(nullable=True), - 'status': fields.ListOfEnumField( - valid_values=list(consts.ACTION_STATUSES), nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.ACTION_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True) - } - - def obj_make_compatible(self, primitive, target_version): - super(ActionListRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 14): - if 'cluster_id' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['cluster_id'] - - -@base.SenlinObjectRegistry.register -class ActionGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - } - - -@base.SenlinObjectRegistry.register -class ActionDeleteRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ActionUpdateRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'status': fields.StringField(), - 'force': fields.BooleanField(default=False) - } diff --git a/senlin/objects/requests/build_info.py b/senlin/objects/requests/build_info.py deleted file mode 100644 index a7b8781e9..000000000 --- a/senlin/objects/requests/build_info.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects import base - - -@base.SenlinObjectRegistry.register -class GetRevisionRequest(base.SenlinObject): - - fields = {} diff --git a/senlin/objects/requests/cluster_policies.py b/senlin/objects/requests/cluster_policies.py deleted file mode 100644 index 137ae9e01..000000000 --- a/senlin/objects/requests/cluster_policies.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ClusterPolicyListRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy_name': fields.NameField(nullable=True), - 'policy_type': fields.StringField(nullable=True), - 'enabled': fields.BooleanField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.CLUSTER_POLICY_SORT_KEYS), nullable=True) - } - - -@base.SenlinObjectRegistry.register -class ClusterPolicyGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy_id': fields.StringField(), - } diff --git a/senlin/objects/requests/clusters.py b/senlin/objects/requests/clusters.py deleted file mode 100644 index 4ef30ce12..000000000 --- a/senlin/objects/requests/clusters.py +++ /dev/null @@ -1,297 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import versionutils - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - -CONF = cfg.CONF -CONF.import_opt('default_action_timeout', 'senlin.conf') - - -@base.SenlinObjectRegistry.register -class ClusterListRequest(base.SenlinObject): - - fields = { - 'name': fields.ListOfStringsField(nullable=True), - 'status': fields.ListOfEnumField( - valid_values=list(consts.CLUSTER_STATUSES), nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.CLUSTER_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterCreateRequestBody(base.SenlinObject): - - # VERSION 1.0: initial version - # VERSION 1.1: added field 'config' - VERSION = '1.1' - VERSION_MAP = { - '1.7': '1.1', - } - - fields = { - 'name': fields.NameField(), - 'profile_id': fields.StringField(), - 'min_size': fields.CapacityField( - nullable=True, minimum=0, - default=consts.CLUSTER_DEFAULT_MIN_SIZE), - 'max_size': fields.CapacityField( - nullable=True, minimum=-1, - default=consts.CLUSTER_DEFAULT_MAX_SIZE), - 'desired_capacity': fields.CapacityField( - nullable=True, minimum=0), - 'metadata': fields.JsonField(nullable=True, default={}), - 'timeout': fields.NonNegativeIntegerField( - nullable=True, default=CONF.default_action_timeout), - 'config': fields.JsonField(nullable=True, default={}), - } - - def obj_make_compatible(self, primitive, target_version): - super(ClusterCreateRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if 'config' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['config'] - - -@base.SenlinObjectRegistry.register -class ClusterCreateRequest(base.SenlinObject): - - fields = { - 'cluster': fields.ObjectField('ClusterCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class ClusterGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ClusterUpdateRequest(base.SenlinObject): - - # VERSION 1.0: initial version - # VERSION 1.1: added field 'profile_only' - # VERSION 1.2: added field 'config' - VERSION = '1.2' - VERSION_MAP = { - '1.6': '1.1', - '1.7': '1.2', - } - - fields = { - 'identity': fields.StringField(), - 'name': fields.NameField(nullable=True), - 'profile_id': fields.StringField(nullable=True), - 'metadata': fields.JsonField(nullable=True), - 'timeout': fields.NonNegativeIntegerField(nullable=True), - 'profile_only': fields.BooleanField(nullable=True), - 'config': fields.JsonField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - super(ClusterUpdateRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if 'profile_only' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['profile_only'] - if target_version < (1, 2): - if 'config' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['config'] - - -@base.SenlinObjectRegistry.register -class ClusterAddNodesRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'nodes': fields.IdentityListField(min_items=1) - } - - -@base.SenlinObjectRegistry.register -class ClusterDelNodesRequest(base.SenlinObject): - - # VERSION 1.0: Initial version - # VERSION 1.1: Add field 'destroy_after_deletion' - VERSION = '1.1' - VERSION_MAP = { - '1.4': '1.1', - } - - fields = { - 'identity': fields.StringField(), - 'nodes': fields.IdentityListField(min_items=1), - 'destroy_after_deletion': fields.BooleanField(nullable=True, - default=False) - } - - def obj_make_compatible(self, primitive, target_version): - super(ClusterDelNodesRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if 'destroy_after_deletion' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['destroy_after_deletion'] - - -@base.SenlinObjectRegistry.register -class ClusterReplaceNodesRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'nodes': fields.NodeReplaceMapField(), - } - - -@base.SenlinObjectRegistry.register -class ClusterResizeRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'adjustment_type': fields.AdjustmentTypeField(nullable=True), - 'number': fields.FloatField(nullable=True), - 'min_size': fields.CapacityField(nullable=True, minimum=0), - 'max_size': fields.CapacityField(nullable=True, minimum=-1), - 'min_step': fields.NonNegativeIntegerField(nullable=True), - 'strict': fields.BooleanField(nullable=True, default=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterScaleInRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'count': fields.NonNegativeIntegerField(nullable=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterScaleOutRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'count': fields.NonNegativeIntegerField(nullable=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterAttachPolicyRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy_id': fields.StringField(), - 'enabled': fields.BooleanField(nullable=True, default=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterUpdatePolicyRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy_id': fields.StringField(), - 'enabled': fields.BooleanField(nullable=True, default=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterDetachPolicyRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy_id': fields.StringField(), - } - - -@base.SenlinObjectRegistry.register -class ClusterCheckRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'params': fields.JsonField(nullable=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterRecoverRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'params': fields.JsonField(nullable=True), - } - - -@base.SenlinObjectRegistry.register -class ClusterCollectRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'path': fields.StringField(), - } - - -@base.SenlinObjectRegistry.register -class ClusterOperationRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'operation': fields.StringField(), - 'filters': fields.JsonField(nullable=True, default={}), - 'params': fields.JsonField(nullable=True, default={}), - } - - -@base.SenlinObjectRegistry.register -class ClusterDeleteRequest(base.SenlinObject): - # VERSION 1.0: Initial version - # VERSION 1.1 Added field 'force' - VERSION = '1.1' - VERSION_MAP = { - '1.8': '1.1', - } - - fields = { - 'identity': fields.StringField(), - 'force': fields.BooleanField(default=False) - } - - def obj_make_compatible(self, primitive, target_version): - super(ClusterDeleteRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if 'force' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['force'] - - -@base.SenlinObjectRegistry.register -class ClusterCompleteLifecycleRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'lifecycle_action_token': fields.StringField(), - } diff --git a/senlin/objects/requests/credentials.py b/senlin/objects/requests/credentials.py deleted file mode 100644 index b7ab5ab5c..000000000 --- a/senlin/objects/requests/credentials.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class CredentialCreateRequest(base.SenlinObject): - fields = { - 'cred': fields.JsonField(), - 'attrs': fields.JsonField(nullable=True, default={}) - } - - -@base.SenlinObjectRegistry.register -class CredentialGetRequest(base.SenlinObject): - fields = { - 'user': fields.StringField(), - 'project': fields.StringField(), - 'query': fields.JsonField(nullable=True, default={}) - } - - -@base.SenlinObjectRegistry.register -class CredentialUpdateRequest(base.SenlinObject): - fields = { - 'cred': fields.JsonField(), - 'attrs': fields.JsonField(nullable=True, default={}) - } diff --git a/senlin/objects/requests/events.py b/senlin/objects/requests/events.py deleted file mode 100644 index 556332112..000000000 --- a/senlin/objects/requests/events.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class EventListRequest(base.SenlinObject): - - action_name_list = list(consts.CLUSTER_ACTION_NAMES) - action_name_list.extend(list(consts.NODE_ACTION_NAMES)) - - fields = { - 'oid': fields.ListOfStringsField(nullable=True), - 'oname': fields.ListOfStringsField(nullable=True), - 'otype': fields.ListOfStringsField(nullable=True), - 'action': fields.ListOfEnumField( - valid_values=action_name_list, nullable=True), - 'cluster_id': fields.ListOfStringsField(nullable=True), - 'level': fields.ListOfEnumField( - valid_values=list(consts.EVENT_LEVELS.keys()), nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.EVENT_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True) - } - - -@base.SenlinObjectRegistry.register -class EventGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - } diff --git a/senlin/objects/requests/nodes.py b/senlin/objects/requests/nodes.py deleted file mode 100644 index 5b0adc844..000000000 --- a/senlin/objects/requests/nodes.py +++ /dev/null @@ -1,165 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class NodeCreateRequestBody(base.SenlinObject): - - fields = { - 'cluster_id': fields.StringField(nullable=True, default=''), - 'metadata': fields.JsonField(nullable=True, default={}), - 'name': fields.NameField(), - 'profile_id': fields.StringField(), - 'role': fields.StringField(nullable=True, default='') - } - - -@base.SenlinObjectRegistry.register -class NodeCreateRequest(base.SenlinObject): - - fields = { - 'node': fields.ObjectField('NodeCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class NodeListRequest(base.SenlinObject): - - fields = { - 'cluster_id': fields.StringField(nullable=True), - 'name': fields.ListOfStringsField(nullable=True), - 'status': fields.ListOfEnumField( - valid_values=list(consts.NODE_STATUSES), nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.NODE_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True) - } - - -@base.SenlinObjectRegistry.register -class NodeGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'show_details': fields.FlexibleBooleanField(nullable=True, - default=False) - } - - -@base.SenlinObjectRegistry.register -class NodeUpdateRequest(base.SenlinObject): - - VERSION = '1.1' - VERSION_MAP = { - '1.13': '1.1' - } - - fields = { - 'identity': fields.StringField(), - 'metadata': fields.JsonField(nullable=True), - 'name': fields.NameField(nullable=True), - 'profile_id': fields.StringField(nullable=True), - 'role': fields.StringField(nullable=True), - 'tainted': fields.BooleanField(nullable=True) - } - - def obj_make_compatible(self, primitive, target_version): - super(NodeUpdateRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 13): - if 'tainted' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['tainted'] - - -@base.SenlinObjectRegistry.register -class NodeDeleteRequest(base.SenlinObject): - # VERSION 1.0: Initial version - # VERSION 1.1 Added field 'force' - VERSION = '1.1' - VERSION_MAP = { - '1.8': '1.1', - } - - fields = { - 'identity': fields.StringField(), - 'force': fields.BooleanField(default=False) - } - - def obj_make_compatible(self, primitive, target_version): - super(NodeDeleteRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 1): - if 'force' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['force'] - - -@base.SenlinObjectRegistry.register -class NodeCheckRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'params': fields.JsonField(nullable=True) - } - - -@base.SenlinObjectRegistry.register -class NodeRecoverRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'params': fields.JsonField(nullable=True) - } - - -@base.SenlinObjectRegistry.register -class NodeOperationRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'operation': fields.StringField(), - 'params': fields.JsonField(nullable=True) - } - - -@base.SenlinObjectRegistry.register -class NodeAdoptRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'type': fields.StringField(), - 'name': fields.NameField(nullable=True), - 'role': fields.StringField(nullable=True), - 'metadata': fields.JsonField(nullable=True, default={}), - 'overrides': fields.JsonField(nullable=True), - 'snapshot': fields.BooleanField(nullable=True, default=False) - } - - -@base.SenlinObjectRegistry.register -class NodeAdoptPreviewRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'type': fields.StringField(), - 'overrides': fields.JsonField(nullable=True), - 'snapshot': fields.BooleanField(nullable=True, default=False) - } diff --git a/senlin/objects/requests/policies.py b/senlin/objects/requests/policies.py deleted file mode 100644 index 47f569b9c..000000000 --- a/senlin/objects/requests/policies.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class PolicyListRequest(base.SenlinObject): - - fields = { - 'name': fields.ListOfStringsField(nullable=True), - 'type': fields.ListOfStringsField(nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.POLICY_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True), - } - - -@base.SenlinObjectRegistry.register -class PolicyCreateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField(), - 'spec': fields.JsonField(), - } - - -@base.SenlinObjectRegistry.register -class PolicyCreateRequest(base.SenlinObject): - - fields = { - 'policy': fields.ObjectField('PolicyCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class PolicyGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class PolicyUpdateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField() - } - - -@base.SenlinObjectRegistry.register -class PolicyUpdateRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'policy': fields.ObjectField('PolicyUpdateRequestBody'), - } - - -@base.SenlinObjectRegistry.register -class PolicyDeleteRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class PolicyValidateRequestBody(base.SenlinObject): - - fields = { - 'spec': fields.JsonField() - } - - -@base.SenlinObjectRegistry.register -class PolicyValidateRequest(base.SenlinObject): - - fields = { - 'policy': fields.ObjectField('PolicyValidateRequestBody') - } diff --git a/senlin/objects/requests/policy_type.py b/senlin/objects/requests/policy_type.py deleted file mode 100644 index d97780b79..000000000 --- a/senlin/objects/requests/policy_type.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class PolicyTypeGetRequest(base.SenlinObject): - - fields = { - 'type_name': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class PolicyTypeListRequest(base.SenlinObject): - - fields = {} diff --git a/senlin/objects/requests/profile_type.py b/senlin/objects/requests/profile_type.py deleted file mode 100644 index 2a525a5c2..000000000 --- a/senlin/objects/requests/profile_type.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ProfileTypeGetRequest(base.SenlinObject): - - fields = { - 'type_name': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ProfileTypeListRequest(base.SenlinObject): - - fields = {} - - -@base.SenlinObjectRegistry.register -class ProfileTypeOpListRequest(base.SenlinObject): - - fields = { - 'type_name': fields.StringField() - } diff --git a/senlin/objects/requests/profiles.py b/senlin/objects/requests/profiles.py deleted file mode 100644 index 25bbe42dc..000000000 --- a/senlin/objects/requests/profiles.py +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ProfileCreateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField(), - 'spec': fields.JsonField(), - 'metadata': fields.JsonField(nullable=True, default={}), - } - - -@base.SenlinObjectRegistry.register -class ProfileCreateRequest(base.SenlinObject): - - fields = { - 'profile': fields.ObjectField('ProfileCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class ProfileListRequest(base.SenlinObject): - - fields = { - 'name': fields.ListOfStringsField(nullable=True), - 'type': fields.ListOfStringsField(nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.PROFILE_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True), - } - - -@base.SenlinObjectRegistry.register -class ProfileGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ProfileUpdateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField(nullable=True), - 'metadata': fields.JsonField(nullable=True) - } - - -@base.SenlinObjectRegistry.register -class ProfileUpdateRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'profile': fields.ObjectField('ProfileUpdateRequestBody'), - } - - -@base.SenlinObjectRegistry.register -class ProfileDeleteRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ProfileValidateRequestBody(base.SenlinObject): - - fields = { - 'spec': fields.JsonField() - } - - -@base.SenlinObjectRegistry.register -class ProfileValidateRequest(base.SenlinObject): - - fields = { - 'profile': fields.ObjectField('ProfileValidateRequestBody') - } diff --git a/senlin/objects/requests/receivers.py b/senlin/objects/requests/receivers.py deleted file mode 100644 index 0bae977ab..000000000 --- a/senlin/objects/requests/receivers.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import versionutils - -from senlin.common import consts -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class ReceiverCreateRequestBody(base.SenlinObject): - - fields = { - 'name': fields.NameField(), - 'type': fields.ReceiverTypeField(), - 'cluster_id': fields.StringField(nullable=True), - 'action': fields.ClusterActionNameField(nullable=True), - 'actor': fields.JsonField(nullable=True, default={}), - 'params': fields.JsonField(nullable=True, default={}) - } - - -@base.SenlinObjectRegistry.register -class ReceiverCreateRequest(base.SenlinObject): - - fields = { - 'receiver': fields.ObjectField('ReceiverCreateRequestBody') - } - - -@base.SenlinObjectRegistry.register -class ReceiverListRequest(base.SenlinObject): - - # VERSION 1.0: Initial version - # VERSION 1.1: Add field 'user' - VERSION = '1.1' - VERSION_MAP = { - '1.4': '1.1', - } - - fields = { - 'name': fields.ListOfStringsField(nullable=True), - 'type': fields.ListOfEnumField( - valid_values=list(consts.RECEIVER_TYPES), nullable=True), - 'action': fields.ListOfEnumField( - valid_values=list(consts.CLUSTER_ACTION_NAMES), nullable=True), - 'cluster_id': fields.ListOfStringsField(nullable=True), - 'limit': fields.NonNegativeIntegerField(nullable=True), - 'marker': fields.UUIDField(nullable=True), - 'sort': fields.SortField( - valid_keys=list(consts.RECEIVER_SORT_KEYS), nullable=True), - 'project_safe': fields.FlexibleBooleanField(default=True), - 'user': fields.ListOfStringsField(nullable=True), - } - - def obj_make_compatible(self, primitive, target_version): - super(ReceiverListRequest, self).obj_make_compatible( - primitive, target_version) - target_version = versionutils.convert_version_to_tuple( - target_version) - if target_version < (1, 1): - if 'user' in primitive['senlin_object.data']: - del primitive['senlin_object.data']['user'] - - -@base.SenlinObjectRegistry.register -class ReceiverGetRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ReceiverUpdateRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'name': fields.NameField(nullable=True), - 'action': fields.ClusterActionNameField(nullable=True), - 'params': fields.JsonField(nullable=True, default={}) - } - - -@base.SenlinObjectRegistry.register -class ReceiverDeleteRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } - - -@base.SenlinObjectRegistry.register -class ReceiverNotifyRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField() - } diff --git a/senlin/objects/requests/webhooks.py b/senlin/objects/requests/webhooks.py deleted file mode 100644 index 2e3c7a61b..000000000 --- a/senlin/objects/requests/webhooks.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class WebhookTriggerRequestParamsInBody(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'body': fields.JsonField(nullable=True, default={}) - } - - -@base.SenlinObjectRegistry.register -class WebhookTriggerRequest(base.SenlinObject): - - fields = { - 'identity': fields.StringField(), - 'body': fields.ObjectField('WebhookTriggerRequestBody') - } - - -@base.SenlinObjectRegistry.register -class WebhookTriggerRequestBody(base.SenlinObject): - - fields = { - 'params': fields.JsonField(nullable=True, default={}) - } diff --git a/senlin/objects/service.py b/senlin/objects/service.py deleted file mode 100644 index 8a00195a1..000000000 --- a/senlin/objects/service.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Service object.""" - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import fields - - -@base.SenlinObjectRegistry.register -class Service(base.SenlinObject, base.VersionedObjectDictCompat): - """Senlin service object.""" - - fields = { - 'id': fields.UUIDField(), - 'host': fields.StringField(), - 'binary': fields.StringField(), - 'topic': fields.StringField(), - 'disabled': fields.BooleanField(), - 'disabled_reason': fields.StringField(nullable=True), - 'created_at': fields.DateTimeField(), - 'updated_at': fields.DateTimeField(), - } - - @classmethod - def create(cls, context, service_id, host=None, binary=None, topic=None): - obj = db_api.service_create(service_id, host=host, binary=binary, - topic=topic) - return cls._from_db_object(context, cls(context), obj) - - @classmethod - def get(cls, context, service_id): - obj = db_api.service_get(service_id) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def get_all(cls, context): - objs = db_api.service_get_all() - return [cls._from_db_object(context, cls(), obj) for obj in objs] - - @classmethod - def cleanup_all_expired(cls, binary): - db_api.service_cleanup_all_expired(binary) - - @classmethod - def update(cls, context, obj_id, values=None): - obj = db_api.service_update(obj_id, values=values) - return cls._from_db_object(context, cls(), obj) - - @classmethod - def delete(cls, obj_id): - db_api.service_delete(obj_id) - - @classmethod - def gc_by_engine(cls, engine_id): - db_api.gc_by_engine(engine_id) diff --git a/senlin/policies/__init__.py b/senlin/policies/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/policies/affinity_policy.py b/senlin/policies/affinity_policy.py deleted file mode 100644 index 1c9b6a81b..000000000 --- a/senlin/policies/affinity_policy.py +++ /dev/null @@ -1,305 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for placing nodes based on Nova server groups. - -NOTE: For full documentation about how the affinity policy works, check: -https://docs.openstack.org/senlin/latest/contributor/policies/affinity_v1.html -""" - -import re - -from oslo_log import log as logging -from senlin.common import constraints -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.common import schema -from senlin.common import utils -from senlin.objects import cluster_policy as cpo -from senlin.policies import base - - -LOG = logging.getLogger(__name__) - - -class AffinityPolicy(base.Policy): - """Policy for placing members of a cluster based on server groups. - - This policy is expected to be enforced before new member(s) added to an - existing cluster. - """ - VERSION = '1.0' - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.10'} - ] - } - PRIORITY = 300 - - TARGET = [ - ('BEFORE', consts.CLUSTER_SCALE_OUT), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.NODE_CREATE), - ] - - PROFILE_TYPE = [ - 'os.nova.server-1.0', - ] - - KEYS = ( - SERVER_GROUP, AVAILABILITY_ZONE, ENABLE_DRS_EXTENSION, - ) = ( - 'servergroup', 'availability_zone', 'enable_drs_extension', - ) - - _GROUP_KEYS = ( - GROUP_NAME, GROUP_POLICIES, - ) = ( - 'name', 'policies', - ) - - _POLICIES_VALUES = ( - # NOTE: soft policies are supported from compute micro version 2.15 - AFFINITY, SOFT_AFFINITY, ANTI_AFFINITY, SOFT_ANTI_AFFINITY, - ) = ( - 'affinity', 'soft-affinity', 'anti-affinity', 'soft-anti-affinity', - ) - - properties_schema = { - SERVER_GROUP: schema.Map( - _('Properties of the VM server group'), - schema={ - GROUP_NAME: schema.String( - _('The name of the server group'), - ), - GROUP_POLICIES: schema.String( - _('The server group policies.'), - default=ANTI_AFFINITY, - constraints=[ - constraints.AllowedValues(_POLICIES_VALUES), - ], - ), - }, - ), - AVAILABILITY_ZONE: schema.String( - _('Name of the availability zone to place the nodes.'), - ), - ENABLE_DRS_EXTENSION: schema.Boolean( - _('Enable vSphere DRS extension.'), - default=False, - ), - } - - def __init__(self, name, spec, **kwargs): - super(AffinityPolicy, self).__init__(name, spec, **kwargs) - - self.enable_drs = self.properties.get(self.ENABLE_DRS_EXTENSION) - - def validate(self, context, validate_props=False): - super(AffinityPolicy, self).validate(context, validate_props) - - if not validate_props: - return True - - az_name = self.properties.get(self.AVAILABILITY_ZONE) - if az_name: - nc = self.nova(context.user_id, context.project_id) - valid_azs = nc.validate_azs([az_name]) - if not valid_azs: - msg = _("The specified %(key)s '%(value)s' could not be " - "found.") % {'key': self.AVAILABILITY_ZONE, - 'value': az_name} - raise exc.InvalidSpec(message=msg) - - return True - - def attach(self, cluster, enabled=True): - """Routine to be invoked when policy is to be attached to a cluster. - - :para cluster: The cluster to which the policy is being attached to. - :param enabled: The attached cluster policy is enabled or disabled. - :returns: When the operation was successful, returns a tuple (True, - message); otherwise, return a tuple (False, error). - """ - res, data = super(AffinityPolicy, self).attach(cluster) - if res is False: - return False, data - - data = {'inherited_group': False} - nc = self.nova(cluster.user, cluster.project) - group = self.properties.get(self.SERVER_GROUP) - - # guess servergroup name - group_name = group.get(self.GROUP_NAME, None) - - if group_name is None: - profile = cluster.rt['profile'] - if 'scheduler_hints' in profile.spec: - hints = profile.spec['scheduler_hints'] - group_name = hints.get('group', None) - - if group_name: - try: - server_group = nc.server_group_find(group_name, True) - except exc.InternalError as ex: - msg = _("Failed in retrieving servergroup '%s'." - ) % group_name - LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex}) - return False, msg - - if server_group: - # Check if the policies match - policies = group.get(self.GROUP_POLICIES) - if policies and policies != server_group.policies[0]: - msg = _("Policies specified (%(specified)s) doesn't match " - "that of the existing servergroup (%(existing)s)." - ) % {'specified': policies, - 'existing': server_group.policies[0]} - return False, msg - - data['servergroup_id'] = server_group.id - data['inherited_group'] = True - - if not data['inherited_group']: - # create a random name if necessary - if not group_name: - group_name = 'server_group_%s' % utils.random_name() - try: - server_group = nc.server_group_create( - name=group_name, - policies=[group.get(self.GROUP_POLICIES)]) - except Exception as ex: - msg = _('Failed in creating servergroup.') - LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex}) - return False, msg - - data['servergroup_id'] = server_group.id - - policy_data = self._build_policy_data(data) - - return True, policy_data - - def detach(self, cluster): - """Routine to be called when the policy is detached from a cluster. - - :param cluster: The cluster from which the policy is to be detached. - :returns: When the operation was successful, returns a tuple of - (True, data) where the data contains references to the - resources created; otherwise returns a tuple of (False, - error) where the err contains an error message. - """ - - reason = _('Servergroup resource deletion succeeded.') - - ctx = context.get_admin_context() - binding = cpo.ClusterPolicy.get(ctx, cluster.id, self.id) - if not binding or not binding.data: - return True, reason - - policy_data = self._extract_policy_data(binding.data) - if not policy_data: - return True, reason - - group_id = policy_data.get('servergroup_id', None) - inherited_group = policy_data.get('inherited_group', False) - - if group_id and not inherited_group: - try: - nc = self.nova(cluster.user, cluster.project) - nc.server_group_delete(group_id) - except Exception as ex: - msg = _('Failed in deleting servergroup.') - LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex}) - return False, msg - - return True, reason - - def pre_op(self, cluster_id, action): - """Routine to be called before target action is executed. - - This policy annotates the node with a server group ID before the - node is actually created. For vSphere DRS, it is equivalent to the - selection of vSphere host (cluster). - - :param cluster_id: ID of the cluster on which the relevant action - is to be executed. - :param action: The action object that triggered this operation. - :returns: Nothing. - """ - zone_name = self.properties.get(self.AVAILABILITY_ZONE) - if not zone_name and self.enable_drs: - # we make a reasonable guess of the zone name for vSphere - # support because the zone name is required in that case. - zone_name = 'nova' - - # we respect other policies decisions (if any) and fall back to the - # action inputs if no hints found. - pd = action.data.get('creation', None) - if pd is not None: - count = pd.get('count', 1) - elif action.action == consts.CLUSTER_SCALE_OUT: - count = action.inputs.get('count', 1) - elif action.action == consts.NODE_CREATE: - count = 1 - else: # CLUSTER_RESIZE - cluster = action.entity - current = len(cluster.nodes) - su.parse_resize_params(action, cluster, current) - if 'creation' not in action.data: - return - count = action.data['creation']['count'] - - cp = cpo.ClusterPolicy.get(action.context, cluster_id, self.id) - policy_data = self._extract_policy_data(cp.data) - pd_entry = {'servergroup': policy_data['servergroup_id']} - - # special handling for vSphere DRS case where we need to find out - # the name of the vSphere host which has DRS enabled. - if self.enable_drs: - obj = action.entity - nc = self.nova(obj.user, obj.project) - - hypervisors = nc.hypervisor_list() - hv_id = '' - pattern = re.compile(r'.*drs*', re.I) - for hypervisor in hypervisors: - match = pattern.match(hypervisor.hypervisor_hostname) - if match: - hv_id = hypervisor.id - break - - if not hv_id: - action.data['status'] = base.CHECK_ERROR - action.data['status_reason'] = _('No suitable vSphere host ' - 'is available.') - action.store(action.context) - return - - hv_info = nc.hypervisor_get(hv_id) - hostname = hv_info['service']['host'] - pd_entry['zone'] = ":".join([zone_name, hostname]) - - elif zone_name: - pd_entry['zone'] = zone_name - - pd = { - 'count': count, - 'placements': [pd_entry] * count, - } - action.data.update({'placement': pd}) - action.store(action.context) - - return diff --git a/senlin/policies/base.py b/senlin/policies/base.py deleted file mode 100644 index dd285f11b..000000000 --- a/senlin/policies/base.py +++ /dev/null @@ -1,373 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context as oslo_context -from oslo_utils import reflection -from oslo_utils import timeutils - -from senlin.common import context as senlin_context -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.common import utils -from senlin.drivers import base as driver -from senlin.engine import environment -from senlin.objects import credential as co -from senlin.objects import policy as po - -CHECK_RESULTS = ( - CHECK_OK, CHECK_ERROR, CHECK_NONE -) = ( - 'OK', 'ERROR', 'NONE' -) - - -class Policy(object): - """Base class for policies.""" - - VERSIONS = {} - - PROFILE_TYPE = 'ANY' - - KEYS = ( - TYPE, VERSION, DESCRIPTION, PROPERTIES, - ) = ( - 'type', 'version', 'description', 'properties', - ) - - spec_schema = { - TYPE: schema.String( - _('Name of the policy type.'), - required=True, - ), - VERSION: schema.String( - _('Version number of the policy type.'), - required=True, - ), - DESCRIPTION: schema.String( - _('A text description of policy.'), - default='', - ), - PROPERTIES: schema.Map( - _('Properties for the policy.'), - required=True, - ) - } - - properties_schema = {} - - def __new__(cls, name, spec, **kwargs): - """Create a new policy of the appropriate class. - - :param name: The name for the policy. - :param spec: A dictionary containing the spec for the policy. - :param kwargs: Keyword arguments for policy creation. - :returns: An instance of a specific sub-class of Policy. - """ - type_name, version = schema.get_spec_version(spec) - type_str = "-".join([type_name, version]) - - if cls != Policy: - PolicyClass = cls - else: - PolicyClass = environment.global_env().get_policy(type_str) - - return super(Policy, cls).__new__(PolicyClass) - - def __init__(self, name, spec, **kwargs): - """Initialize a policy instance. - - :param name: The name for the policy. - :param spec: A dictionary containing the detailed policy spec. - :param kwargs: Keyword arguments for initializing the policy. - :returns: An instance of a specific sub-class of Policy. - """ - - type_name, version = schema.get_spec_version(spec) - type_str = "-".join([type_name, version]) - self.name = name - self.spec = spec - - self.id = kwargs.get('id', None) - self.type = kwargs.get('type', type_str) - self.user = kwargs.get('user') - self.project = kwargs.get('project') - self.domain = kwargs.get('domain') - self.data = kwargs.get('data', {}) - - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.spec_data = schema.Spec(self.spec_schema, spec) - self.properties = schema.Spec( - self.properties_schema, - self.spec.get(self.PROPERTIES, {}), - version) - - self.singleton = True - self._novaclient = None - self._keystoneclient = None - self._networkclient = None - self._octaviaclient = None - self._lbaasclient = None - - @classmethod - def _from_object(cls, policy): - """Construct a policy from a Policy object. - - @param cls: The target class. - @param policy: A policy object. - """ - - kwargs = { - 'id': policy.id, - 'type': policy.type, - 'user': policy.user, - 'project': policy.project, - 'domain': policy.domain, - 'created_at': policy.created_at, - 'updated_at': policy.updated_at, - 'data': policy.data, - } - - return cls(policy.name, policy.spec, **kwargs) - - @classmethod - def load(cls, context, policy_id=None, db_policy=None, project_safe=True): - """Retrieve and reconstruct a policy object from DB. - - :param context: DB context for object retrieval. - :param policy_id: Optional parameter specifying the ID of policy. - :param db_policy: Optional parameter referencing a policy DB object. - :param project_safe: Optional parameter specifying whether only - policies belong to the context.project will be - loaded. - :returns: An object of the proper policy class. - """ - if db_policy is None: - db_policy = po.Policy.get(context, policy_id, - project_safe=project_safe) - if db_policy is None: - raise exception.ResourceNotFound(type='policy', id=policy_id) - - return cls._from_object(db_policy) - - @classmethod - def delete(cls, context, policy_id): - po.Policy.delete(context, policy_id) - - def store(self, context): - """Store the policy object into database table.""" - timestamp = timeutils.utcnow(True) - - values = { - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'spec': self.spec, - 'data': self.data, - } - - if self.id is not None: - self.updated_at = timestamp - values['updated_at'] = timestamp - po.Policy.update(context, self.id, values) - else: - self.created_at = timestamp - values['created_at'] = timestamp - policy = po.Policy.create(context, values) - self.id = policy.id - - return self.id - - def validate(self, context, validate_props=False): - """Validate the schema and the data provided.""" - self.spec_data.validate() - self.properties.validate() - - @classmethod - def get_schema(cls): - return dict((name, dict(schema)) - for name, schema in cls.properties_schema.items()) - - def _build_policy_data(self, data): - clsname = reflection.get_class_name(self, fully_qualified=False) - version = self.VERSION - result = { - clsname: { - 'version': version, - 'data': data, - } - } - return result - - def _extract_policy_data(self, policy_data): - clsname = reflection.get_class_name(self, fully_qualified=False) - if clsname not in policy_data: - return None - data = policy_data.get(clsname) - if 'version' not in data or data['version'] != self.VERSION: - return None - - return data.get('data', None) - - def _build_conn_params(self, user, project): - """Build trust-based connection parameters. - - :param user: the user for which the trust will be checked. - :param project: the user for which the trust will be checked. - """ - service_creds = senlin_context.get_service_credentials() - params = { - 'username': service_creds.get('username'), - 'password': service_creds.get('password'), - 'auth_url': service_creds.get('auth_url'), - 'user_domain_name': service_creds.get('user_domain_name'), - 'project_domain_name': service_creds.get('project_domain_name'), - 'verify': service_creds.get('verify'), - 'interface': service_creds.get('interface'), - } - - cred = co.Credential.get(oslo_context.get_current(), user, project) - if cred is None: - raise exception.TrustNotFound(trustor=user) - params['trust_id'] = cred.cred['openstack']['trust'] - - return params - - def keystone(self, user, project): - """Construct keystone client based on object. - - :param user: The ID of the requesting user. - :param project: The ID of the requesting project. - :returns: A reference to the keystone client. - """ - if self._keystoneclient is not None: - return self._keystoneclient - params = self._build_conn_params(user, project) - self._keystoneclient = driver.SenlinDriver().identity(params) - return self._keystoneclient - - def nova(self, user, project): - """Construct nova client based on user and project. - - :param user: The ID of the requesting user. - :param project: The ID of the requesting project. - :returns: A reference to the nova client. - """ - if self._novaclient is not None: - return self._novaclient - - params = self._build_conn_params(user, project) - self._novaclient = driver.SenlinDriver().compute(params) - return self._novaclient - - def network(self, user, project): - """Construct network client based on user and project. - - :param user: The ID of the requesting user. - :param project: The ID of the requesting project. - :returns: A reference to the network client. - """ - if self._networkclient is not None: - return self._networkclient - - params = self._build_conn_params(user, project) - self._networkclient = driver.SenlinDriver().network(params) - return self._networkclient - - def octavia(self, user, project): - """Construct octavia client based on user and project. - - :param user: The ID of the requesting user. - :param project: The ID of the requesting project. - :returns: A reference to the octavia client. - """ - if self._octaviaclient is not None: - return self._octaviaclient - - params = self._build_conn_params(user, project) - self._octaviaclient = driver.SenlinDriver().octavia(params) - - return self._octaviaclient - - def lbaas(self, user, project): - """Construct LB service client based on user and project. - - :param user: The ID of the requesting user. - :param project: The ID of the requesting project. - :returns: A reference to the LB service client. - """ - if self._lbaasclient is not None: - return self._lbaasclient - - params = self._build_conn_params(user, project) - - self._lbaasclient = driver.SenlinDriver().loadbalancing(params) - return self._lbaasclient - - def attach(self, cluster, enabled=True): - """Method to be invoked before policy is attached to a cluster. - - :param cluster: The cluster to which the policy is being attached to. - :param enabled: The attached cluster policy is enabled or disabled. - :returns: (True, message) if the operation is successful, or (False, - error) otherwise. - """ - if self.PROFILE_TYPE == ['ANY']: - return True, None - - profile = cluster.rt['profile'] - if profile.type not in self.PROFILE_TYPE: - error = _('Policy not applicable on profile type: ' - '%s') % profile.type - return False, error - - return True, None - - def detach(self, cluster): - """Method to be invoked before policy is detached from a cluster.""" - return True, None - - def need_check(self, target, action): - if getattr(self, 'TARGET', None) is None: - return True - - if (target, action.action) in self.TARGET: - return True - else: - return False - - def pre_op(self, cluster_id, action): - """A method that will be invoked before an action execution.""" - return - - def post_op(self, cluster_id, action): - """A method that will be invoked after an action execution.""" - return - - def to_dict(self): - pb_dict = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'spec': self.spec, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - 'data': self.data, - } - return pb_dict diff --git a/senlin/policies/batch_policy.py b/senlin/policies/batch_policy.py deleted file mode 100644 index cc65333dd..000000000 --- a/senlin/policies/batch_policy.py +++ /dev/null @@ -1,171 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for batching operations on a cluster. - -NOTE: How update policy works - -Input: - cluster: the cluster whose nodes are to be updated. -Output: - stored in action.data: A dictionary containing a detailed update schedule. - { - 'status': 'OK', - 'update': { - 'pause_time': 2, - 'plan': [{ - 'node-id-1', - 'node-id-2', - }, { - 'node-id-3', - 'node-id-4', - }, { - 'node-id-5', - } - ] - } - } -""" -import math - -from senlin.common import consts -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.common import schema -from senlin.policies import base - - -class BatchPolicy(base.Policy): - """Policy for batching the operations on a cluster's nodes.""" - - VERSION = '1.0' - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.02'} - ] - } - PRIORITY = 200 - - TARGET = [ - ('BEFORE', consts.CLUSTER_UPDATE), - ] - - PROFILE_TYPE = [ - 'ANY' - ] - - KEYS = ( - MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME, - ) = ( - 'min_in_service', 'max_batch_size', 'pause_time', - ) - - properties_schema = { - MIN_IN_SERVICE: schema.Integer( - _('Minimum number of nodes in service when performing updates.'), - default=1, - ), - MAX_BATCH_SIZE: schema.Integer( - _('Maximum number of nodes that will be updated in parallel.'), - default=-1, - ), - PAUSE_TIME: schema.Integer( - _('Interval in seconds between update batches if any.'), - default=60, - ) - } - - def __init__(self, name, spec, **kwargs): - super(BatchPolicy, self).__init__(name, spec, **kwargs) - - self.min_in_service = self.properties[self.MIN_IN_SERVICE] - self.max_batch_size = self.properties[self.MAX_BATCH_SIZE] - self.pause_time = self.properties[self.PAUSE_TIME] - - def _get_batch_size(self, total): - """Get batch size for update operation. - - :param total: Total number of nodes. - :returns: Size of each batch. - """ - - # if the number of nodes less than min_in_service, - # we divided it to 2 batches - diff = int(math.ceil(float(total) / 2)) - if total > self.min_in_service: - diff = total - self.min_in_service - - # max_batch_size is -1 if not specified - if self.max_batch_size == -1 or diff < self.max_batch_size: - batch_size = diff - else: - batch_size = self.max_batch_size - - return batch_size - - def _pick_nodes(self, nodes, batch_size): - """Select nodes based on size and number of batches. - - :param nodes: list of node objects. - :param batch_size: the number of nodes of each batch. - :returns: a list of sets containing the nodes' IDs we - selected based on the input params. - """ - candidates, good = su.filter_error_nodes(nodes) - result = [] - # NOTE: we leave the nodes known to be good (ACTIVE) at the end of the - # list so that we have a better chance to ensure 'min_in_service' - # constraint - for n in good: - candidates.append(n.id) - - for start in range(0, len(candidates), batch_size): - end = start + batch_size - result.append(set(candidates[start:end])) - - return result - - def _create_plan(self, action): - nodes = action.entity.nodes - - plan = {'pause_time': self.pause_time} - if len(nodes) == 0: - plan['plan'] = [] - return True, plan - - batch_size = self._get_batch_size(len(nodes)) - plan['plan'] = self._pick_nodes(nodes, batch_size) - - return True, plan - - def pre_op(self, cluster_id, action): - - pd = { - 'status': base.CHECK_OK, - 'reason': _('Batching request validated.'), - } - # for updating - result, value = self._create_plan(action) - - if result is False: - pd = { - 'status': base.CHECK_ERROR, - 'reason': value, - } - else: - pd['update'] = value - - action.data.update(pd) - action.store(action.context) - - return diff --git a/senlin/policies/deletion_policy.py b/senlin/policies/deletion_policy.py deleted file mode 100644 index 075e4129c..000000000 --- a/senlin/policies/deletion_policy.py +++ /dev/null @@ -1,273 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for deleting node(s) from a cluster. - -NOTE: For full documentation about how the deletion policy works, check: -https://docs.openstack.org/senlin/latest/contributor/policies/deletion_v1.html -""" -from oslo_log import log as logging - -from senlin.common import constraints -from senlin.common import consts -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.common import schema -from senlin.policies import base - -LOG = logging.getLogger(__name__) - - -class DeletionPolicy(base.Policy): - """Policy for choosing victim node(s) from a cluster for deletion. - - This policy is enforced when nodes are to be removed from a cluster. - It will yield an ordered list of candidates for deletion based on user - specified criteria. - """ - - VERSION = '1.1' - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ], - '1.1': [ - {'status': consts.SUPPORTED, 'since': '2018.01'} - ], - } - PRIORITY = 400 - - KEYS = ( - CRITERIA, DESTROY_AFTER_DELETION, GRACE_PERIOD, - REDUCE_DESIRED_CAPACITY, HOOKS, TYPE, PARAMS, QUEUE, URL, TIMEOUT - ) = ( - 'criteria', 'destroy_after_deletion', 'grace_period', - 'reduce_desired_capacity', 'hooks', 'type', 'params', 'queue', 'url', - 'timeout' - ) - - CRITERIA_VALUES = ( - OLDEST_FIRST, OLDEST_PROFILE_FIRST, YOUNGEST_FIRST, RANDOM, - ) = ( - 'OLDEST_FIRST', 'OLDEST_PROFILE_FIRST', 'YOUNGEST_FIRST', 'RANDOM', - ) - - HOOK_VALUES = ( - ZAQAR, WEBHOOK - ) = ( - 'zaqar', 'webhook', - ) - - TARGET = [ - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_DEL_NODES), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.NODE_DELETE), - ] - - PROFILE_TYPE = [ - 'ANY' - ] - - properties_schema = { - CRITERIA: schema.String( - _('Criteria used in selecting candidates for deletion'), - default=RANDOM, - constraints=[ - constraints.AllowedValues(CRITERIA_VALUES), - ] - ), - DESTROY_AFTER_DELETION: schema.Boolean( - _('Whether a node should be completely destroyed after ' - 'deletion. Default to True'), - default=True, - ), - GRACE_PERIOD: schema.Integer( - _('Number of seconds before real deletion happens.'), - default=0, - ), - REDUCE_DESIRED_CAPACITY: schema.Boolean( - _('Whether the desired capacity of the cluster should be ' - 'reduced along the deletion. Default to True.'), - default=True, - ), - HOOKS: schema.Map( - _("Lifecycle hook properties"), - schema={ - TYPE: schema.String( - _("Type of lifecycle hook"), - default=ZAQAR, - constraints=[ - constraints.AllowedValues(HOOK_VALUES), - ] - ), - PARAMS: schema.Map( - schema={ - QUEUE: schema.String( - _("Zaqar queue to receive lifecycle hook message"), - default="", - ), - URL: schema.String( - _("Url sink to which to send lifecycle hook " - "message"), - default="", - ), - }, - default={} - ), - TIMEOUT: schema.Integer( - _('Number of seconds before actual deletion happens.'), - default=0, - ), - }, - default={} - ) - } - - def __init__(self, name, spec, **kwargs): - super(DeletionPolicy, self).__init__(name, spec, **kwargs) - - self.criteria = self.properties[self.CRITERIA] - self.grace_period = self.properties[self.GRACE_PERIOD] - self.destroy_after_deletion = self.properties[ - self.DESTROY_AFTER_DELETION] - self.reduce_desired_capacity = self.properties[ - self.REDUCE_DESIRED_CAPACITY] - self.hooks = self.properties[self.HOOKS] - - def _victims_by_regions(self, cluster, regions): - victims = [] - for region in sorted(regions.keys()): - count = regions[region] - nodes = cluster.nodes_by_region(region) - if self.criteria == self.RANDOM: - candidates = su.nodes_by_random(nodes, count) - elif self.criteria == self.OLDEST_PROFILE_FIRST: - candidates = su.nodes_by_profile_age(nodes, count) - elif self.criteria == self.OLDEST_FIRST: - candidates = su.nodes_by_age(nodes, count, True) - else: - candidates = su.nodes_by_age(nodes, count, False) - - victims.extend(candidates) - - return victims - - def _victims_by_zones(self, cluster, zones): - victims = [] - for zone in sorted(zones.keys()): - count = zones[zone] - nodes = cluster.nodes_by_zone(zone) - if self.criteria == self.RANDOM: - candidates = su.nodes_by_random(nodes, count) - elif self.criteria == self.OLDEST_PROFILE_FIRST: - candidates = su.nodes_by_profile_age(nodes, count) - elif self.criteria == self.OLDEST_FIRST: - candidates = su.nodes_by_age(nodes, count, True) - else: - candidates = su.nodes_by_age(nodes, count, False) - - victims.extend(candidates) - - return victims - - def _update_action(self, action, victims): - pd = action.data.get('deletion', {}) - pd['count'] = len(victims) - pd['candidates'] = victims - pd['destroy_after_deletion'] = self.destroy_after_deletion - pd['grace_period'] = self.grace_period - pd['reduce_desired_capacity'] = self.reduce_desired_capacity - action.data.update({ - 'status': base.CHECK_OK, - 'reason': _('Candidates generated'), - 'deletion': pd - }) - action.store(action.context) - - def pre_op(self, cluster_id, action): - """Choose victims that can be deleted. - - :param cluster_id: ID of the cluster to be handled. - :param action: The action object that triggered this policy. - """ - - victims = action.inputs.get('candidates', []) - if len(victims) > 0: - self._update_action(action, victims) - return - - if action.action == consts.NODE_DELETE: - self._update_action(action, [action.entity.id]) - return - - cluster = action.entity - regions = None - zones = None - - hooks_data = self.hooks - action.data.update({'status': base.CHECK_OK, - 'reason': _('lifecycle hook parameters saved'), - 'hooks': hooks_data}) - action.store(action.context) - - deletion = action.data.get('deletion', {}) - if deletion: - # there are policy decisions - count = deletion['count'] - regions = deletion.get('regions', None) - zones = deletion.get('zones', None) - # No policy decision, check action itself: SCALE_IN - elif action.action == consts.CLUSTER_SCALE_IN: - count = action.inputs.get('count', 1) - - # No policy decision, check action itself: RESIZE - else: - current = len(cluster.nodes) - res, reason = su.parse_resize_params(action, cluster, current) - if res == base.CHECK_ERROR: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = reason - LOG.error(reason) - return - - if 'deletion' not in action.data: - return - count = action.data['deletion']['count'] - - # Cross-region - if regions: - victims = self._victims_by_regions(cluster, regions) - self._update_action(action, victims) - return - - # Cross-AZ - if zones: - victims = self._victims_by_zones(cluster, zones) - self._update_action(action, victims) - return - - if count > len(cluster.nodes): - count = len(cluster.nodes) - - if self.criteria == self.RANDOM: - victims = su.nodes_by_random(cluster.nodes, count) - elif self.criteria == self.OLDEST_PROFILE_FIRST: - victims = su.nodes_by_profile_age(cluster.nodes, count) - elif self.criteria == self.OLDEST_FIRST: - victims = su.nodes_by_age(cluster.nodes, count, True) - else: - victims = su.nodes_by_age(cluster.nodes, count, False) - - self._update_action(action, victims) - return diff --git a/senlin/policies/health_policy.py b/senlin/policies/health_policy.py deleted file mode 100644 index bb1fa6bc5..000000000 --- a/senlin/policies/health_policy.py +++ /dev/null @@ -1,517 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -from oslo_config import cfg -from oslo_log import log as logging - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils -from senlin.common import schema -from senlin.engine import health_manager -from senlin.policies import base - -LOG = logging.getLogger(__name__) - - -class HealthPolicy(base.Policy): - """Policy for health management of a cluster.""" - - VERSION = '1.2' - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.02'}, - {'status': consts.SUPPORTED, 'since': '2018.06'}, - ], - '1.1': [ - {'status': consts.SUPPORTED, 'since': '2018.09'} - ], - '1.2': [ - {'status': consts.SUPPORTED, 'since': '2020.09'} - ], - } - PRIORITY = 600 - - TARGET = [ - ('BEFORE', consts.CLUSTER_RECOVER), - ('BEFORE', consts.CLUSTER_DEL_NODES), - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.CLUSTER_UPDATE), - ('BEFORE', consts.CLUSTER_RECOVER), - ('BEFORE', consts.CLUSTER_REPLACE_NODES), - ('BEFORE', consts.NODE_DELETE), - ('AFTER', consts.CLUSTER_DEL_NODES), - ('AFTER', consts.CLUSTER_SCALE_IN), - ('AFTER', consts.CLUSTER_RESIZE), - ('AFTER', consts.CLUSTER_UPDATE), - ('AFTER', consts.CLUSTER_RECOVER), - ('AFTER', consts.CLUSTER_REPLACE_NODES), - ('AFTER', consts.NODE_DELETE), - ] - - # Should be ANY if profile provides health check support? - PROFILE_TYPE = [ - 'os.nova.server', - 'os.heat.stack', - ] - - KEYS = (DETECTION, RECOVERY) = ('detection', 'recovery') - - _DETECTION_KEYS = ( - DETECTION_MODES, DETECTION_TYPE, DETECTION_OPTIONS, DETECTION_INTERVAL, - NODE_UPDATE_TIMEOUT, RECOVERY_CONDITIONAL - ) = ( - 'detection_modes', 'type', 'options', 'interval', - 'node_update_timeout', 'recovery_conditional' - ) - - _DETECTION_OPTIONS = ( - POLL_URL, POLL_URL_SSL_VERIFY, - POLL_URL_CONN_ERROR_AS_UNHEALTHY, POLL_URL_HEALTHY_RESPONSE, - POLL_URL_RETRY_LIMIT, POLL_URL_RETRY_INTERVAL, - ) = ( - 'poll_url', 'poll_url_ssl_verify', - 'poll_url_conn_error_as_unhealthy', 'poll_url_healthy_response', - 'poll_url_retry_limit', 'poll_url_retry_interval' - ) - - _RECOVERY_KEYS = ( - RECOVERY_ACTIONS, RECOVERY_FENCING, RECOVERY_DELETE_TIMEOUT, - RECOVERY_FORCE_RECREATE, - ) = ( - 'actions', 'fencing', 'node_delete_timeout', 'node_force_recreate', - ) - - FENCING_OPTION_VALUES = ( - COMPUTE, - # STORAGE, NETWORK, - ) = ( - 'COMPUTE', - # 'STORAGE', 'NETWORK' - ) - - ACTION_KEYS = ( - ACTION_NAME, ACTION_PARAMS, - ) = ( - 'name', 'params', - ) - - properties_schema = { - DETECTION: schema.Map( - _('Policy aspect for node failure detection.'), - schema={ - DETECTION_INTERVAL: schema.Integer( - _("Number of seconds between pollings. Only " - "required when type is 'NODE_STATUS_POLLING' or " - "'NODE_STATUS_POLL_URL' or 'HYPERVISOR_STATUS_POLLING."), - default=60, - ), - NODE_UPDATE_TIMEOUT: schema.Integer( - _("Number of seconds since last node update to " - "wait before checking node health."), - default=300, - ), - RECOVERY_CONDITIONAL: schema.String( - _("The conditional that determines when recovery should be" - " performed in case multiple detection modes are " - "specified. 'ALL_FAILED' means that all " - "detection modes have to return failed health checks " - "before a node is recovered. 'ANY_FAILED'" - " means that a failed health check with a single " - "detection mode triggers a node recovery."), - constraints=[ - constraints.AllowedValues( - consts.RECOVERY_CONDITIONAL), - ], - default=consts.ANY_FAILED, - required=False, - ), - DETECTION_MODES: schema.List( - _('List of node failure detection modes.'), - schema=schema.Map( - _('Node failure detection mode to try'), - schema={ - DETECTION_TYPE: schema.String( - _('Type of node failure detection.'), - constraints=[ - constraints.AllowedValues( - consts.DETECTION_TYPES), - ], - required=True, - ), - DETECTION_OPTIONS: schema.Map( - schema={ - POLL_URL: schema.String( - _("URL to poll for node status. See " - "documentation for valid expansion " - "parameters. Only required " - "when type is " - "'NODE_STATUS_POLL_URL'."), - default='', - ), - POLL_URL_SSL_VERIFY: schema.Boolean( - _("Whether to verify SSL when calling " - "URL to poll for node status. Only " - "required when type is " - "'NODE_STATUS_POLL_URL'."), - default=True, - ), - POLL_URL_CONN_ERROR_AS_UNHEALTHY: - schema.Boolean( - _("Whether to treat URL connection " - "errors as an indication of an " - "unhealthy node. Only required " - "when type is " - "'NODE_STATUS_POLL_URL'."), - default=True, - ), - POLL_URL_HEALTHY_RESPONSE: schema.String( - _("String pattern in the poll URL " - "response body that indicates a " - "healthy node. Required when type " - "is 'NODE_STATUS_POLL_URL'."), - default='', - ), - POLL_URL_RETRY_LIMIT: schema.Integer( - _("Number of times to retry URL " - "polling when its return body is " - "missing POLL_URL_HEALTHY_RESPONSE " - "string before a node is considered " - "down. Required when type is " - "'NODE_STATUS_POLL_URL'."), - default=3, - ), - POLL_URL_RETRY_INTERVAL: schema.Integer( - _("Number of seconds between URL " - "polling retries before a node is " - "considered down. Required when " - "type is 'NODE_STATUS_POLL_URL'."), - default=3, - ), - }, - default={} - ), - } - ) - ) - }, - required=True, - ), - - RECOVERY: schema.Map( - _('Policy aspect for node failure recovery.'), - schema={ - RECOVERY_ACTIONS: schema.List( - _('List of actions to try for node recovery.'), - schema=schema.Map( - _('Action to try for node recovery.'), - schema={ - ACTION_NAME: schema.String( - _("Name of action to execute."), - constraints=[ - constraints.AllowedValues( - consts.RECOVERY_ACTIONS), - ], - required=True - ), - ACTION_PARAMS: schema.Map( - _("Parameters for the action") - ), - } - ) - ), - RECOVERY_FENCING: schema.List( - _('List of services to be fenced.'), - schema=schema.String( - _('Service to be fenced.'), - constraints=[ - constraints.AllowedValues(FENCING_OPTION_VALUES), - ], - required=True, - ), - ), - RECOVERY_DELETE_TIMEOUT: schema.Integer( - _("Number of seconds to wait for node deletion to " - "finish and start node creation for recreate " - "recovery option. Required when type is " - "'NODE_STATUS_POLL_URL and recovery action " - "is RECREATE'."), - default=20, - ), - RECOVERY_FORCE_RECREATE: schema.Boolean( - _("Whether to create node even if node deletion " - "failed. Required when type is " - "'NODE_STATUS_POLL_URL' and action recovery " - "action is RECREATE."), - default=False, - ), - }, - required=True, - ), - } - - def __init__(self, name, spec, **kwargs): - super(HealthPolicy, self).__init__(name, spec, **kwargs) - - self.interval = self.properties[self.DETECTION].get( - self.DETECTION_INTERVAL, 60) - - self.node_update_timeout = self.properties[self.DETECTION].get( - self.NODE_UPDATE_TIMEOUT, 300) - - self.recovery_conditional = self.properties[self.DETECTION].get( - self.RECOVERY_CONDITIONAL, consts.ANY_FAILED) - - DetectionMode = namedtuple( - 'DetectionMode', - [self.DETECTION_TYPE] + list(self._DETECTION_OPTIONS)) - - self.detection_modes = [] - - raw_modes = self.properties[self.DETECTION][self.DETECTION_MODES] - for mode in raw_modes: - options = mode[self.DETECTION_OPTIONS] - - self.detection_modes.append( - DetectionMode( - mode[self.DETECTION_TYPE], - options.get(self.POLL_URL, ''), - options.get(self.POLL_URL_SSL_VERIFY, True), - options.get(self.POLL_URL_CONN_ERROR_AS_UNHEALTHY, True), - options.get(self.POLL_URL_HEALTHY_RESPONSE, ''), - options.get(self.POLL_URL_RETRY_LIMIT, ''), - options.get(self.POLL_URL_RETRY_INTERVAL, '') - ) - ) - - recover_settings = self.properties[self.RECOVERY] - self.recover_actions = recover_settings[self.RECOVERY_ACTIONS] - self.fencing_types = recover_settings[self.RECOVERY_FENCING] - self.node_delete_timeout = recover_settings.get( - self.RECOVERY_DELETE_TIMEOUT, None) - self.node_force_recreate = recover_settings.get( - self.RECOVERY_FORCE_RECREATE, False) - - def validate(self, context, validate_props=False): - super(HealthPolicy, self).validate(context, - validate_props=validate_props) - - if len(self.recover_actions) > 1: - message = _("Only one '%s' is supported for now." - ) % self.RECOVERY_ACTIONS - raise exc.ESchema(message=message) - - if self.interval < cfg.CONF.health_check_interval_min: - message = _("Specified interval of %(interval)d seconds has to be " - "larger than health_check_interval_min of " - "%(min_interval)d seconds set in configuration." - ) % {"interval": self.interval, - "min_interval": - cfg.CONF.health_check_interval_min} - raise exc.InvalidSpec(message=message) - - # check valid detection types - polling_types = [consts.NODE_STATUS_POLLING, - consts.NODE_STATUS_POLL_URL, - consts.HYPERVISOR_STATUS_POLLING] - - has_valid_polling_types = all( - d.type in polling_types - for d in self.detection_modes - ) - has_valid_lifecycle_type = ( - len(self.detection_modes) == 1 and - self.detection_modes[0].type == consts.LIFECYCLE_EVENTS - ) - - if not has_valid_polling_types and not has_valid_lifecycle_type: - message = ("Invalid detection modes in health policy: %s" % - ', '.join([d.type for d in self.detection_modes])) - raise exc.InvalidSpec(message=message) - - if len(self.detection_modes) != len(set(self.detection_modes)): - message = ("Duplicate detection modes are not allowed in " - "health policy: %s" % - ', '.join([d.type for d in self.detection_modes])) - raise exc.InvalidSpec(message=message) - - # TODO(Qiming): Add detection of duplicated action names when - # support to list of actions is implemented. - - def attach(self, cluster, enabled=True): - """"Hook for policy attach. - - Register the cluster for health management. - - :param cluster: The cluster to which the policy is being attached to. - :param enabled: The attached cluster policy is enabled or disabled. - :return: A tuple comprising execution result and policy data. - """ - p_type = cluster.rt['profile'].type_name - action_names = [a['name'] for a in self.recover_actions] - if p_type != 'os.nova.server': - if consts.RECOVER_REBUILD in action_names: - err_msg = _("Recovery action REBUILD is only applicable to " - "os.nova.server clusters.") - return False, err_msg - - if consts.RECOVER_REBOOT in action_names: - err_msg = _("Recovery action REBOOT is only applicable to " - "os.nova.server clusters.") - return False, err_msg - - kwargs = { - 'interval': self.interval, - 'node_update_timeout': self.node_update_timeout, - 'params': { - 'recover_action': self.recover_actions, - 'node_delete_timeout': self.node_delete_timeout, - 'node_force_recreate': self.node_force_recreate, - 'recovery_conditional': self.recovery_conditional, - }, - 'enabled': enabled - } - - converted_detection_modes = [ - d._asdict() for d in self.detection_modes - ] - detection_mode = {'detection_modes': converted_detection_modes} - kwargs['params'].update(detection_mode) - - ret = health_manager.register(cluster.id, engine_id=None, **kwargs) - if not ret: - LOG.warning('Registering health manager for cluster %s ' - 'timed out.', cluster.id) - err_msg = _("Registering health manager for cluster timed out.") - return False, err_msg - - data = { - 'interval': self.interval, - 'node_update_timeout': self.node_update_timeout, - 'recovery_conditional': self.recovery_conditional, - 'node_delete_timeout': self.node_delete_timeout, - 'node_force_recreate': self.node_force_recreate, - } - data.update(detection_mode) - - return True, self._build_policy_data(data) - - def detach(self, cluster): - """Hook for policy detach. - - Unregister the cluster for health management. - :param cluster: The target cluster. - :returns: A tuple comprising the execution result and reason. - """ - ret = health_manager.unregister(cluster.id) - if not ret: - LOG.warning('Unregistering health manager for cluster %s ' - 'timed out.', cluster.id) - err_msg = _("Unregistering health manager for cluster timed out.") - return False, err_msg - return True, '' - - def pre_op(self, cluster_id, action, **args): - """Hook before action execution. - - Disable health policy for actions that modify cluster nodes (e.g. - scale in, delete nodes, cluster update, cluster recover and cluster - replace nodes). - For all other actions, set the health policy data in the action data. - - :param cluster_id: The ID of the target cluster. - :param action: The action to be examined. - :param kwargs args: Other keyword arguments to be checked. - :returns: Boolean indicating whether the checking passed. - """ - if action.action in (consts.CLUSTER_SCALE_IN, - consts.CLUSTER_DEL_NODES, - consts.NODE_DELETE, - consts.CLUSTER_UPDATE, - consts.CLUSTER_RECOVER, - consts.CLUSTER_REPLACE_NODES): - health_manager.disable(cluster_id) - return True - - if action.action == consts.CLUSTER_RESIZE: - deletion = action.data.get('deletion', None) - if deletion: - health_manager.disable(cluster_id) - return True - - cluster = action.entity - current = len(cluster.nodes) - res, reason = scaleutils.parse_resize_params(action, cluster, - current) - if res == base.CHECK_ERROR: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = reason - return False - - if action.data.get('deletion', None): - health_manager.disable(cluster_id) - return True - - pd = { - 'recover_action': self.recover_actions, - 'fencing': self.fencing_types, - } - action.data.update({'health': pd}) - action.store(action.context) - - return True - - def post_op(self, cluster_id, action, **args): - """Hook before action execution. - - One of the task for this routine is to re-enable health policy if the - action is a request that will shrink the cluster thus the policy has - been temporarily disabled. - - :param cluster_id: The ID of the target cluster. - :param action: The action to be examined. - :param kwargs args: Other keyword arguments to be checked. - :returns: Boolean indicating whether the checking passed. - """ - if action.action in (consts.CLUSTER_SCALE_IN, - consts.CLUSTER_DEL_NODES, - consts.NODE_DELETE, - consts.CLUSTER_UPDATE, - consts.CLUSTER_RECOVER, - consts.CLUSTER_REPLACE_NODES): - health_manager.enable(cluster_id) - return True - - if action.action == consts.CLUSTER_RESIZE: - deletion = action.data.get('deletion', None) - if deletion: - health_manager.enable(cluster_id) - return True - - cluster = action.entity - current = len(cluster.nodes) - res, reason = scaleutils.parse_resize_params(action, cluster, - current) - if res == base.CHECK_ERROR: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = reason - return False - - if action.data.get('deletion', None): - health_manager.enable(cluster_id) - return True - - return True diff --git a/senlin/policies/lb_policy.py b/senlin/policies/lb_policy.py deleted file mode 100644 index 639c75c41..000000000 --- a/senlin/policies/lb_policy.py +++ /dev/null @@ -1,746 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for load-balancing among nodes in a cluster. - -NOTE: For full documentation about how the load-balancing policy works, -check: https://docs.openstack.org/senlin/latest/contributor/policies/ -load_balance_v1.html -""" - -from oslo_context import context as oslo_context -from oslo_log import log as logging - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils -from senlin.common import schema -from senlin.engine import cluster_policy -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.policies import base - -LOG = logging.getLogger(__name__) - - -class LoadBalancingPolicy(base.Policy): - """Policy for load balancing among members of a cluster. - - This policy is expected to be enforced before or after the membership of a - cluster is changed. We need to refresh the load-balancer associated with - the cluster (which could be created by the policy) when these actions are - performed. - """ - VERSION = '1.3' - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ], - '1.1': [ - {'status': consts.SUPPORTED, 'since': '2018.01'} - ], - '1.2': [ - {'status': consts.SUPPORTED, 'since': '2020.02'} - ], - '1.3': [ - {'status': consts.SUPPORTED, 'since': '2020.03'} - ], - } - - PRIORITY = 500 - - TARGET = [ - ('AFTER', consts.CLUSTER_ADD_NODES), - ('AFTER', consts.CLUSTER_SCALE_OUT), - ('AFTER', consts.CLUSTER_RESIZE), - ('AFTER', consts.NODE_RECOVER), - ('AFTER', consts.NODE_CREATE), - ('AFTER', consts.CLUSTER_REPLACE_NODES), - ('BEFORE', consts.CLUSTER_DEL_NODES), - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.NODE_DELETE), - ('BEFORE', consts.CLUSTER_REPLACE_NODES), - ] - - PROFILE_TYPE = [ - 'os.nova.server-1.0', - ] - - KEYS = ( - POOL, VIP, HEALTH_MONITOR, LB_STATUS_TIMEOUT, LOADBALANCER, - AVAILABILITY_ZONE, FLAVOR_ID, - ) = ( - 'pool', 'vip', 'health_monitor', 'lb_status_timeout', 'loadbalancer', - 'availability_zone', 'flavor_id', - ) - - _POOL_KEYS = ( - POOL_PROTOCOL, POOL_PROTOCOL_PORT, POOL_SUBNET, - POOL_LB_METHOD, POOL_ADMIN_STATE_UP, POOL_SESSION_PERSISTENCE, POOL_ID, - ) = ( - 'protocol', 'protocol_port', 'subnet', - 'lb_method', 'admin_state_up', 'session_persistence', 'id', - ) - - PROTOCOLS = ( - HTTP, HTTPS, TCP, - ) = ( - 'HTTP', 'HTTPS', 'TCP', - ) - - LB_METHODS = ( - ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP, - ) = ( - 'ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP', - ) - - HEALTH_MONITOR_TYPES = ( - PING, TCP, HTTP, HTTPS, - ) = ( - 'PING', 'TCP', 'HTTP', 'HTTPS', - ) - - HTTP_METHODS = ( - GET, POST, PUT, DELETE, - ) = ( - 'GET', 'POST', 'PUT', 'DELETE', - ) - - _VIP_KEYS = ( - VIP_SUBNET, VIP_NETWORK, VIP_ADDRESS, VIP_CONNECTION_LIMIT, - VIP_PROTOCOL, VIP_PROTOCOL_PORT, VIP_ADMIN_STATE_UP, - ) = ( - 'subnet', 'network', 'address', 'connection_limit', 'protocol', - 'protocol_port', 'admin_state_up', - ) - - HEALTH_MONITOR_KEYS = ( - HM_TYPE, HM_DELAY, HM_TIMEOUT, HM_MAX_RETRIES, HM_ADMIN_STATE_UP, - HM_HTTP_METHOD, HM_URL_PATH, HM_EXPECTED_CODES, HM_ID, - ) = ( - 'type', 'delay', 'timeout', 'max_retries', 'admin_state_up', - 'http_method', 'url_path', 'expected_codes', 'id', - ) - - _SESSION_PERSISTENCE_KEYS = ( - PERSISTENCE_TYPE, COOKIE_NAME, - ) = ( - 'type', 'cookie_name', - ) - - PERSISTENCE_TYPES = ( - PERSIST_SOURCE_IP, PERSIST_HTTP_COOKIE, PERSIST_APP_COOKIE, - PERSIST_NONE, - ) = ( - 'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE', 'NONE', - ) - - properties_schema = { - POOL: schema.Map( - _('LB pool properties.'), - schema={ - POOL_PROTOCOL: schema.String( - _('Protocol used for load balancing.'), - constraints=[ - constraints.AllowedValues(PROTOCOLS), - ], - default=HTTP, - ), - POOL_PROTOCOL_PORT: schema.Integer( - _('Port on which servers are running on the nodes.'), - default=80, - ), - POOL_SUBNET: schema.String( - _('Name or ID of subnet for the port on which nodes can ' - 'be connected.'), - required=True, - ), - POOL_LB_METHOD: schema.String( - _('Load balancing algorithm.'), - constraints=[ - constraints.AllowedValues(LB_METHODS), - ], - default=ROUND_ROBIN, - ), - POOL_ADMIN_STATE_UP: schema.Boolean( - _('Administrative state of the pool.'), - default=True, - ), - POOL_SESSION_PERSISTENCE: schema.Map( - _('Session persistence configuration.'), - schema={ - PERSISTENCE_TYPE: schema.String( - _('Type of session persistence implementation.'), - constraints=[ - constraints.AllowedValues(PERSISTENCE_TYPES), - ], - ), - COOKIE_NAME: schema.String( - _('Name of cookie if type set to APP_COOKIE.'), - ), - }, - default={}, - ), - POOL_ID: schema.String( - _('ID of pool for the cluster on which nodes can ' - 'be connected.'), - default=None, - ), - }, - ), - VIP: schema.Map( - _('VIP address and port of the pool.'), - schema={ - VIP_SUBNET: schema.String( - _('Name or ID of Subnet on which the VIP address will be ' - 'allocated. One of Subnet or Network is required.'), - required=False, - ), - VIP_NETWORK: schema.String( - _('Name or ID of Network on which the VIP address will be ' - 'allocated. One of Subnet or Network is required.'), - required=False, - ), - VIP_ADDRESS: schema.String( - _('IP address of the VIP.'), - default=None, - ), - VIP_CONNECTION_LIMIT: schema.Integer( - _('Maximum number of connections per second allowed for ' - 'this VIP'), - default=-1, - ), - VIP_PROTOCOL: schema.String( - _('Protocol used for VIP.'), - constraints=[ - constraints.AllowedValues(PROTOCOLS), - ], - default=HTTP, - ), - VIP_PROTOCOL_PORT: schema.Integer( - _('TCP port to listen on.'), - default=80, - ), - VIP_ADMIN_STATE_UP: schema.Boolean( - _('Administrative state of the VIP.'), - default=True, - ), - }, - ), - HEALTH_MONITOR: schema.Map( - _('Health monitor for loadbalancer.'), - schema={ - HM_TYPE: schema.String( - _('The type of probe sent by the loadbalancer to verify ' - 'the member state.'), - constraints=[ - constraints.AllowedValues(HEALTH_MONITOR_TYPES), - ], - default=PING, - ), - HM_DELAY: schema.Integer( - _('The amount of time in milliseconds between sending ' - 'probes to members.'), - default=10, - ), - HM_TIMEOUT: schema.Integer( - _('The maximum time in milliseconds that a monitor waits ' - 'to connect before it times out.'), - default=5, - ), - HM_MAX_RETRIES: schema.Integer( - _('The number of allowed connection failures before ' - 'changing the status of the member to INACTIVE.'), - default=3, - ), - HM_ADMIN_STATE_UP: schema.Boolean( - _('Administrative state of the health monitor.'), - default=True, - ), - HM_HTTP_METHOD: schema.String( - _('The HTTP method that the monitor uses for requests.'), - constraints=[ - constraints.AllowedValues(HTTP_METHODS), - ], - ), - HM_URL_PATH: schema.String( - _('The HTTP path of the request sent by the monitor to ' - 'test the health of a member.'), - ), - HM_EXPECTED_CODES: schema.String( - _('Expected HTTP codes for a passing HTTP(S) monitor.'), - ), - HM_ID: schema.String( - _('ID of the health manager for the loadbalancer.'), - default=None, - ), - }, - ), - LB_STATUS_TIMEOUT: schema.Integer( - _('Time in second to wait for loadbalancer to become ready ' - 'after senlin requests LBaaS V2 service for operations.'), - default=300, - ), - LOADBALANCER: schema.String( - _('Name or ID of loadbalancer for the cluster on which nodes can ' - 'be connected.'), - default=None, - ), - AVAILABILITY_ZONE: schema.String( - _('Name of the loadbalancer availability zone to use for creation ' - 'of the loadbalancer.'), - default=None, - ), - FLAVOR_ID: schema.String( - _('ID of octavia loadbalancer flavor to use for creation ' - 'of the loadbalancer.'), - default=None, - ) - } - - def __init__(self, name, spec, **kwargs): - super(LoadBalancingPolicy, self).__init__(name, spec, **kwargs) - - self.pool_spec = self.properties.get(self.POOL, {}) - self.vip_spec = self.properties.get(self.VIP, {}) - self.hm_spec = self.properties.get(self.HEALTH_MONITOR, None) - self.az_spec = self.properties.get(self.AVAILABILITY_ZONE, None) - self.flavor_id_spec = self.properties.get(self.FLAVOR_ID, None) - self.lb_status_timeout = self.properties.get(self.LB_STATUS_TIMEOUT) - self.lb = self.properties.get(self.LOADBALANCER, None) - - def validate(self, context, validate_props=False): - super(LoadBalancingPolicy, self).validate(context, validate_props) - - if not validate_props: - return True - - nc = self.network(context.user_id, context.project_id) - oc = self.octavia(context.user_id, context.project_id) - - # validate pool subnet - name_or_id = self.pool_spec.get(self.POOL_SUBNET) - try: - nc.subnet_get(name_or_id) - except exc.InternalError: - msg = _("The specified %(key)s '%(value)s' could not be found." - ) % {'key': self.POOL_SUBNET, 'value': name_or_id} - raise exc.InvalidSpec(message=msg) - - # validate loadbalancer flavor_id - flavor_id = self.flavor_id_spec - if flavor_id: - try: - oc.find_flavor(flavor_id) - except exc.InternalError: - msg = _("The specified %(key)s '%(value)s' could not be found." - ) % {'key': self.FLAVOR_ID, 'value': flavor_id} - raise exc.InvalidSpec(message=msg) - - # validate VIP subnet or network - subnet_name_or_id = self.vip_spec.get(self.VIP_SUBNET) - network_name_or_id = self.vip_spec.get(self.VIP_NETWORK) - if not subnet_name_or_id and not network_name_or_id: - msg = _("At least one of VIP Subnet or Network must be defined.") - raise exc.InvalidSpec(message=msg) - try: - # Check subnet if it is set - obj_type = self.VIP_SUBNET - name_or_id = subnet_name_or_id - if name_or_id: - nc.subnet_get(name_or_id) - - # Check network if it is set - obj_type = self.VIP_NETWORK - name_or_id = network_name_or_id - if name_or_id: - nc.network_get(name_or_id) - - # TODO(rm_work): We *could* do more validation here to catch issues - # at validation time, like verifying the subnet's network_id is the - # same as the id of the network, if both are set -- but for now we - # will just leave that up to the LB API, which means if there is a - # failure, it won't be caught until attach time. - except exc.InternalError: - msg = _("The specified %(key)s '%(value)s' could not be found." - ) % {'key': obj_type, 'value': name_or_id} - raise exc.InvalidSpec(message=msg) - - # validate loadbalancer - if self.lb: - try: - oc.loadbalancer_get(self.lb) - except exc.InternalError: - msg = _("The specified %(key)s '%(value)s' could not be found." - ) % {'key': self.LOADBALANCER, 'value': self.lb} - raise exc.InvalidSpec(message=msg) - - def attach(self, cluster, enabled=True): - """Routine to be invoked when policy is to be attached to a cluster. - - :param cluster: The cluster to which the policy is being attached to. - :param enabled: The attached cluster policy is enabled or disabled. - :returns: When the operation was successful, returns a tuple (True, - message); otherwise, return a tuple (False, error). - """ - res, data = super(LoadBalancingPolicy, self).attach(cluster) - if res is False: - return False, data - - lb_driver = self.lbaas(cluster.user, cluster.project) - lb_driver.lb_status_timeout = self.lb_status_timeout - - # Set default name variable senlin cluster name - cluster_name = cluster.name - - # TODO(Anyone): Check if existing nodes has conflicts regarding the - # subnets. Each VM addresses detail has a key named to the network - # which can be used for validation. - if self.lb: - data = {} - data['preexisting'] = True - data['loadbalancer'] = self.lb - data['pool'] = self.pool_spec.get(self.POOL_ID, None) - data['vip_address'] = self.vip_spec.get(self.VIP_ADDRESS, None) - if self.hm_spec and self.hm_spec.get(self.HM_ID, None): - data['healthmonitor'] = self.hm_spec.get(self.HM_ID) - else: - res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec, - cluster_name, - self.hm_spec, self.az_spec, - self.flavor_id_spec) - if res is False: - return False, data - - port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) - subnet = self.pool_spec.get(self.POOL_SUBNET) - - for node in cluster.nodes: - member_id = lb_driver.member_add(node, data['loadbalancer'], - data['pool'], port, subnet) - if member_id is None: - # When failed in adding member, remove all lb resources that - # were created and return the failure reason. - # TODO(anyone): May need to "roll-back" changes caused by any - # successful member_add() calls. - if not self.lb: - lb_driver.lb_delete(**data) - return False, 'Failed in adding node into lb pool' - - node.data.update({'lb_member': member_id}) - values = {'data': node.data} - no.Node.update(oslo_context.get_current(), node.id, values) - - cluster_data_lb = cluster.data.get('loadbalancers', {}) - cluster_data_lb[self.id] = {'vip_address': data.pop('vip_address')} - cluster.data['loadbalancers'] = cluster_data_lb - - policy_data = self._build_policy_data(data) - - return True, policy_data - - def detach(self, cluster): - """Routine to be called when the policy is detached from a cluster. - - :param cluster: The cluster from which the policy is to be detached. - :returns: When the operation was successful, returns a tuple of - (True, data) where the data contains references to the resources - created; otherwise returns a tuple of (False, err) where the err - contains an error message. - """ - reason = _('LB resources deletion succeeded.') - lb_driver = self.lbaas(cluster.user, cluster.project) - lb_driver.lb_status_timeout = self.lb_status_timeout - - cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(), - cluster.id, self.id) - - policy_data = self._extract_policy_data(cp.data) - if policy_data is None: - return True, reason - - is_existed = policy_data.get('preexisting', False) - if not is_existed: - res, reason = lb_driver.lb_delete(**policy_data) - if res is False: - return False, reason - - for node in cluster.nodes: - if 'lb_member' in node.data: - node.data.pop('lb_member') - values = {'data': node.data} - no.Node.update(oslo_context.get_current(), - node.id, values) - else: - # the lb pool is existed, we need to remove servers from it - nodes = cluster.nodes - failed = self._remove_member(oslo_context.get_current(), - [node.id for node in nodes], - cp, lb_driver) - if failed: - return False, _('Failed to remove servers from existed LB.') - - lb_data = cluster.data.get('loadbalancers', {}) - if lb_data and isinstance(lb_data, dict): - lb_data.pop(self.id, None) - if lb_data: - cluster.data['loadbalancers'] = lb_data - else: - cluster.data.pop('loadbalancers') - - return True, reason - - def _get_delete_candidates(self, cluster_id, action): - deletion = action.data.get('deletion', None) - # No deletion field in action.data which means no scaling - # policy or deletion policy is attached. - candidates = None - if deletion is None: - if action.action == consts.NODE_DELETE: - candidates = [action.entity.id] - count = 1 - elif action.action == consts.CLUSTER_DEL_NODES: - # Get candidates from action.input - candidates = action.inputs.get('candidates', []) - count = len(candidates) - elif action.action == consts.CLUSTER_RESIZE: - # Calculate deletion count based on action input - cluster = action.entity - current = len(cluster.nodes) - scaleutils.parse_resize_params(action, cluster, current) - if 'deletion' not in action.data: - return [] - else: - count = action.data['deletion']['count'] - else: # action.action == consts.CLUSTER_SCALE_IN - count = action.inputs.get('count', 1) - elif action.action == consts.CLUSTER_REPLACE_NODES: - candidates = list(action.inputs['candidates'].keys()) - count = len(candidates) - else: - count = deletion.get('count', 0) - candidates = deletion.get('candidates', None) - - # Still no candidates available, pick count of nodes randomly - # apply to CLUSTER_RESIZE/CLUSTER_SCALE_IN - if candidates is None: - if count == 0: - return [] - nodes = action.entity.nodes - if count > len(nodes): - count = len(nodes) - candidates = scaleutils.nodes_by_random(nodes, count) - deletion_data = action.data.get('deletion', {}) - deletion_data.update({ - 'count': len(candidates), - 'candidates': candidates - }) - action.data.update({'deletion': deletion_data}) - - return candidates - - def _remove_member(self, context, candidates, policy, driver, - handle_err=True): - # Load policy data - policy_data = self._extract_policy_data(policy.data) - lb_id = policy_data['loadbalancer'] - pool_id = policy_data['pool'] - - failed_nodes = [] - for node_id in candidates: - node = no.Node.get(context, node_id=node_id) - node_data = node.data or {} - member_id = node_data.get('lb_member', None) - if member_id is None: - LOG.warning('Node %(n)s not found in lb pool %(p)s.', - {'n': node_id, 'p': pool_id}) - continue - - res = driver.member_remove(lb_id, pool_id, member_id) - values = {} - if res is not True and handle_err is True: - failed_nodes.append(node.id) - values['status'] = consts.NS_WARNING - values['status_reason'] = _( - 'Failed in removing node from lb pool.') - else: - node.data.pop('lb_member', None) - values['data'] = node.data - no.Node.update(context, node_id, values) - - return failed_nodes - - def _add_member(self, context, candidates, policy, driver): - # Load policy data - policy_data = self._extract_policy_data(policy.data) - lb_id = policy_data['loadbalancer'] - pool_id = policy_data['pool'] - port = self.pool_spec.get(self.POOL_PROTOCOL_PORT) - subnet = self.pool_spec.get(self.POOL_SUBNET) - - failed_nodes = [] - for node_id in candidates: - node = no.Node.get(context, node_id=node_id) - node_data = node.data or {} - member_id = node_data.get('lb_member', None) - if member_id: - LOG.warning('Node %(n)s already in lb pool %(p)s.', - {'n': node_id, 'p': pool_id}) - continue - - member_id = driver.member_add(node, lb_id, pool_id, port, subnet) - values = {} - if member_id is None: - failed_nodes.append(node.id) - values['status'] = consts.NS_WARNING - values['status_reason'] = _( - 'Failed in adding node into lb pool.') - else: - node.data.update({'lb_member': member_id}) - values['data'] = node.data - no.Node.update(context, node_id, values) - - return failed_nodes - - def _get_post_candidates(self, action): - # This method will parse action data passed from action layer - if (action.action == consts.NODE_CREATE or - action.action == consts.NODE_RECOVER): - candidates = [action.entity.id] - elif action.action == consts.CLUSTER_REPLACE_NODES: - candidates = list(action.inputs['candidates'].values()) - else: - creation = action.data.get('creation', None) - candidates = creation.get('nodes', []) if creation else [] - - return candidates - - def _process_recovery(self, candidates, policy, driver, action): - # Process node recovery action - node = action.entity - data = node.data - lb_member = data.get('lb_member', None) - recovery = data.pop('recovery', None) - values = {} - - # lb_member is None, need to add to lb pool - if not lb_member: - values['data'] = data - no.Node.update(action.context, node.id, values) - return candidates - - # was a member of lb pool, check whether has been recreated - if recovery is not None and recovery == consts.RECOVER_RECREATE: - self._remove_member(action.context, candidates, policy, driver, - handle_err=False) - data.pop('lb_member', None) - values['data'] = data - no.Node.update(action.context, node.id, values) - return candidates - - return None - - def pre_op(self, cluster_id, action): - """Routine to be called before an action has been executed. - - For this particular policy, we take this chance to update the pool - maintained by the load-balancer. - - :param cluster_id: The ID of the cluster on which a relevant action - has been executed. - :param action: The action object that triggered this operation. - :returns: Nothing. - """ - - cluster = co.Cluster.get(oslo_context.get_current(), cluster_id) - # Skip pre_op if cluster is already at min size - # except in the case of node replacement - if ( - cluster.desired_capacity == cluster.min_size and - action.action not in [consts.CLUSTER_REPLACE_NODES, - consts.CLUSTER_RESIZE] - ): - return - - candidates = self._get_delete_candidates(cluster_id, action) - if len(candidates) == 0: - return - - hooks = action.data.get('hooks', {}) - # if hooks properties are defined, defer removal of nodes from LB - # pool to the pre_op call during DEL_NODE action execution - if hooks: - return - - obj = action.entity - lb_driver = self.lbaas(obj.user, obj.project) - lb_driver.lb_status_timeout = self.lb_status_timeout - cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, - self.id) - - # Remove nodes that will be deleted from lb pool - failed_nodes = self._remove_member(action.context, candidates, - cp, lb_driver) - - if failed_nodes: - error = _('Failed in removing deleted node(s) from lb pool: %s' - ) % failed_nodes - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = error - - return - - def post_op(self, cluster_id, action): - """Routine to be called after an action has been executed. - - For this particular policy, we take this chance to update the pool - maintained by the load-balancer. - - :param cluster_id: The ID of the cluster on which a relevant action - has been executed. - :param action: The action object that triggered this operation. - :returns: Nothing. - """ - - # skip post op if action did not complete successfully - action_result = action.inputs.get('action_result', 'OK') - if action_result != 'OK': - return - - # TODO(Yanyanhu): Need special handling for cross-az scenario - # which is supported by Neutron lbaas. - candidates = self._get_post_candidates(action) - if not candidates: - return - - obj = action.entity - lb_driver = self.lbaas(obj.user, obj.project) - lb_driver.lb_status_timeout = self.lb_status_timeout - cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id, - self.id) - if action.action == consts.NODE_RECOVER: - candidates = self._process_recovery( - candidates, cp, lb_driver, action) - if not candidates: - return - - # Add new nodes to lb pool - failed_nodes = self._add_member(action.context, candidates, - cp, lb_driver) - if failed_nodes: - error = _('Failed in adding nodes into lb pool: %s') % failed_nodes - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = error diff --git a/senlin/policies/region_placement.py b/senlin/policies/region_placement.py deleted file mode 100644 index 3365c3e5c..000000000 --- a/senlin/policies/region_placement.py +++ /dev/null @@ -1,283 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for scheduling nodes across multiple regions. - -NOTE: For full documentation about how the policy works, check: -https://docs.openstack.org/senlin/latest/contributor/policies/region_v1.html -""" - -import math -from oslo_log import log as logging - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils -from senlin.common import schema -from senlin.engine import cluster as cm -from senlin.policies import base - -LOG = logging.getLogger(__name__) - - -class RegionPlacementPolicy(base.Policy): - """Policy for placing members of a cluster across multiple regions.""" - - VERSION = '1.0' - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2016.04'}, - {'status': consts.SUPPORTED, 'since': '2016.10'}, - ] - } - - PRIORITY = 200 - - TARGET = [ - ('BEFORE', consts.CLUSTER_SCALE_OUT), - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.NODE_CREATE), - ] - - PROFILE_TYPE = [ - 'ANY' - ] - - KEYS = ( - REGIONS, - ) = ( - 'regions', - ) - - _AZ_KEYS = ( - REGION_NAME, REGION_WEIGHT, REGION_CAP, - ) = ( - 'name', 'weight', 'cap', - ) - - properties_schema = { - REGIONS: schema.List( - _('List of regions to choose from.'), - schema=schema.Map( - _('An region as a candidate.'), - schema={ - REGION_NAME: schema.String( - _('Name of a region.'), - ), - REGION_WEIGHT: schema.Integer( - _('Weight of the region. The default is 100.'), - default=100, - ), - REGION_CAP: schema.Integer( - _('Maximum number of nodes in this region. The ' - 'default is -1 which means no cap set.'), - default=-1, - ), - }, - ), - ), - } - - def __init__(self, name, spec, **kwargs): - super(RegionPlacementPolicy, self).__init__(name, spec, **kwargs) - - regions = {} - for r in self.properties.get(self.REGIONS): - regions[r[self.REGION_NAME]] = { - 'weight': r[self.REGION_WEIGHT], - 'cap': r[self.REGION_CAP], - } - self.regions = regions - - def validate(self, context, validate_props=False): - super(RegionPlacementPolicy, self).validate(context, validate_props) - - if not validate_props: - return True - - kc = self.keystone(context.user_id, context.project_id) - input_regions = sorted(self.regions.keys()) - valid_regions = kc.validate_regions(input_regions) - invalid_regions = sorted(set(input_regions) - set(valid_regions)) - if invalid_regions: - msg = _("The specified regions '%(value)s' could not be " - "found.") % {'value': invalid_regions} - raise exc.InvalidSpec(message=msg) - - return True - - def _create_plan(self, current, regions, count, expand): - """Compute a placement plan based on the weights of regions. - - :param current: Distribution of existing nodes. - :param regions: Usable regions for node creation. - :param count: Number of nodes to create/delete in this plan. - :param expand: True if the plan is for inflating the cluster, False - otherwise. - - :returns: A list of region names selected for the nodes. - """ - # sort candidate regions by distribution and covert it into a list - candidates = sorted(regions.items(), key=lambda x: x[1]['weight'], - reverse=expand) - sum_weight = sum(r['weight'] for r in regions.values()) - if expand: - total = count + sum(current.values()) - else: - total = sum(current.values()) - count - remain = count - plan = dict.fromkeys(regions.keys(), 0) - - for i in range(len(candidates)): - region = candidates[i] - r_name = region[0] - r_weight = region[1]['weight'] - r_cap = region[1]['cap'] - - # maximum number of nodes on current region - q = total * r_weight / float(sum_weight) - if expand: - quota = int(math.ceil(q)) - # respect the cap setting, if any - if r_cap >= 0: - quota = min(quota, r_cap) - headroom = quota - current[r_name] - else: - quota = int(math.floor(q)) - headroom = current[r_name] - quota - - if headroom <= 0: - continue - - if headroom < remain: - plan[r_name] = headroom - remain -= headroom - else: - plan[r_name] = remain if remain > 0 else 0 - remain = 0 - break - - # we have leftovers - if remain > 0: - return None - - result = {} - for reg, count in plan.items(): - if count > 0: - result[reg] = count - - return result - - def _get_count(self, cluster_id, action): - """Get number of nodes to create or delete. - - :param cluster_id: The ID of the target cluster. - :param action: The action object which triggered this policy check. - :return: An integer value which can be 1) positive - number of nodes - to create; 2) negative - number of nodes to delete; 3) 0 - - something wrong happened, and the policy check failed. - """ - if action.action == consts.NODE_CREATE: - # skip node if the context already contains a region_name - profile = action.entity.rt['profile'] - if 'region_name' in profile.properties[profile.CONTEXT]: - return 0 - else: - return 1 - - if action.action == consts.CLUSTER_RESIZE: - if action.data.get('deletion', None): - return -action.data['deletion']['count'] - elif action.data.get('creation', None): - return action.data['creation']['count'] - - cluster = action.entity - curr = len(cluster.nodes) - res = scaleutils.parse_resize_params(action, cluster, curr) - if res[0] == base.CHECK_ERROR: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = res[1] - LOG.error(res[1]) - return 0 - - if action.data.get('deletion', None): - return -action.data['deletion']['count'] - else: - return action.data['creation']['count'] - - if action.action == consts.CLUSTER_SCALE_IN: - pd = action.data.get('deletion', None) - if pd is None: - return -action.inputs.get('count', 1) - else: - return -pd.get('count', 1) - - # CLUSTER_SCALE_OUT: an action that inflates the cluster - pd = action.data.get('creation', None) - if pd is None: - return action.inputs.get('count', 1) - else: - return pd.get('count', 1) - - def pre_op(self, cluster_id, action): - """Callback function when cluster membership is about to change. - - :param cluster_id: ID of the target cluster. - :param action: The action that triggers this policy check. - :returns: ``None``. - """ - count = self._get_count(cluster_id, action) - if count == 0: - return - - expand = True - if count < 0: - expand = False - count = -count - - cluster = cm.Cluster.load(action.context, cluster_id) - kc = self.keystone(cluster.user, cluster.project) - - regions_good = kc.validate_regions(self.regions.keys()) - if len(regions_good) == 0: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = _('No region is found usable.') - LOG.error('No region is found usable.') - return - - regions = {} - for r in self.regions.items(): - if r[0] in regions_good: - regions[r[0]] = r[1] - - current_dist = cluster.get_region_distribution(regions_good) - result = self._create_plan(current_dist, regions, count, expand) - if not result: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = _('There is no feasible plan to ' - 'handle all nodes.') - LOG.error('There is no feasible plan to handle all nodes.') - return - - if expand: - if 'creation' not in action.data: - action.data['creation'] = {} - action.data['creation']['count'] = count - action.data['creation']['regions'] = result - else: - if 'deletion' not in action.data: - action.data['deletion'] = {} - action.data['deletion']['count'] = count - action.data['deletion']['regions'] = result diff --git a/senlin/policies/scaling_policy.py b/senlin/policies/scaling_policy.py deleted file mode 100644 index a0a0d57e2..000000000 --- a/senlin/policies/scaling_policy.py +++ /dev/null @@ -1,283 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.common import schema -from senlin.common import utils -from senlin.objects import cluster_policy as cpo -from senlin.policies import base - - -CONF = cfg.CONF - - -class ScalingPolicy(base.Policy): - """Policy for changing the size of a cluster. - - This policy is expected to be enforced before the node count of a cluster - is changed. - """ - - VERSION = '1.0' - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ] - } - - PRIORITY = 100 - - TARGET = [ - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_SCALE_OUT), - ('AFTER', consts.CLUSTER_SCALE_IN), - ('AFTER', consts.CLUSTER_SCALE_OUT), - ] - - PROFILE_TYPE = [ - 'ANY', - ] - - KEYS = ( - EVENT, ADJUSTMENT, - ) = ( - 'event', 'adjustment', - ) - - _SUPPORTED_EVENTS = ( - CLUSTER_SCALE_IN, CLUSTER_SCALE_OUT, - ) = ( - consts.CLUSTER_SCALE_IN, consts.CLUSTER_SCALE_OUT, - ) - - _ADJUSTMENT_KEYS = ( - ADJUSTMENT_TYPE, ADJUSTMENT_NUMBER, MIN_STEP, BEST_EFFORT, - COOLDOWN, - ) = ( - 'type', 'number', 'min_step', 'best_effort', - 'cooldown', - ) - - properties_schema = { - EVENT: schema.String( - _('Event that will trigger this policy. Must be one of ' - 'CLUSTER_SCALE_IN and CLUSTER_SCALE_OUT.'), - constraints=[ - constraints.AllowedValues(_SUPPORTED_EVENTS), - ], - required=True, - ), - ADJUSTMENT: schema.Map( - _('Detailed specification for scaling adjustments.'), - schema={ - ADJUSTMENT_TYPE: schema.String( - _('Type of adjustment when scaling is triggered.'), - constraints=[ - constraints.AllowedValues(consts.ADJUSTMENT_TYPES), - ], - default=consts.CHANGE_IN_CAPACITY, - ), - ADJUSTMENT_NUMBER: schema.Number( - _('A number specifying the amount of adjustment.'), - default=1, - ), - MIN_STEP: schema.Integer( - _('When adjustment type is set to "CHANGE_IN_PERCENTAGE",' - ' this specifies the cluster size will be decreased by ' - 'at least this number of nodes.'), - default=1, - ), - BEST_EFFORT: schema.Boolean( - _('Whether do best effort scaling when new size of ' - 'cluster will break the size limitation'), - default=False, - ), - COOLDOWN: schema.Integer( - _('Number of seconds to hold the cluster for cool-down ' - 'before allowing cluster to be resized again.'), - default=0, - ), - - } - ), - } - - def __init__(self, name, spec, **kwargs): - """Initialize a scaling policy object. - - :param name: Name for the policy object. - :param spec: A dictionary containing the detailed specification for - the policy. - :param dict kwargs: Other optional parameters for policy object - creation. - :return: An object of `ScalingPolicy`. - """ - super(ScalingPolicy, self).__init__(name, spec, **kwargs) - - self.singleton = False - - self.event = self.properties[self.EVENT] - - adjustment = self.properties[self.ADJUSTMENT] - self.adjustment_type = adjustment[self.ADJUSTMENT_TYPE] - self.adjustment_number = adjustment[self.ADJUSTMENT_NUMBER] - self.adjustment_min_step = adjustment[self.MIN_STEP] - - self.best_effort = adjustment[self.BEST_EFFORT] - self.cooldown = adjustment[self.COOLDOWN] - - def validate(self, context, validate_props=False): - super(ScalingPolicy, self).validate(context, validate_props) - - if self.adjustment_number <= 0: - msg = _("the 'number' for 'adjustment' must be > 0") - raise exc.InvalidSpec(message=msg) - - if self.adjustment_min_step < 0: - msg = _("the 'min_step' for 'adjustment' must be >= 0") - raise exc.InvalidSpec(message=msg) - - if self.cooldown < 0: - msg = _("the 'cooldown' for 'adjustment' must be >= 0") - raise exc.InvalidSpec(message=msg) - - def _calculate_adjustment_count(self, current_size): - """Calculate adjustment count based on current_size. - - :param current_size: The current size of the target cluster. - :return: The number of nodes to add or to remove. - """ - - if self.adjustment_type == consts.EXACT_CAPACITY: - if self.event == consts.CLUSTER_SCALE_IN: - count = current_size - self.adjustment_number - else: - count = self.adjustment_number - current_size - elif self.adjustment_type == consts.CHANGE_IN_CAPACITY: - count = self.adjustment_number - else: # consts.CHANGE_IN_PERCENTAGE: - count = int((self.adjustment_number * current_size) / 100.0) - if count < self.adjustment_min_step: - count = self.adjustment_min_step - - return count - - def pre_op(self, cluster_id, action): - """The hook function that is executed before the action. - - The checking result is stored in the ``data`` property of the action - object rather than returned directly from the function. - - :param cluster_id: The ID of the target cluster. - :param action: Action instance against which the policy is being - checked. - :return: None. - """ - - # check cooldown - last_op = action.inputs.get('last_op', None) - if last_op and not timeutils.is_older_than(last_op, self.cooldown): - action.data.update({ - 'status': base.CHECK_ERROR, - 'reason': _('Policy %s cooldown is still ' - 'in progress.') % self.id - }) - action.store(action.context) - return - - # Use action input if count is provided - count_value = action.inputs.get('count', None) - cluster = action.entity - current = len(cluster.nodes) - - if count_value is None: - # count not specified, calculate it - count_value = self._calculate_adjustment_count(current) - - # Count must be positive value - success, count = utils.get_positive_int(count_value) - if not success: - action.data.update({ - 'status': base.CHECK_ERROR, - 'reason': _("Invalid count (%(c)s) for action '%(a)s'." - ) % {'c': count_value, 'a': action.action} - }) - action.store(action.context) - return - - # Check size constraints - max_size = cluster.max_size - if max_size == -1: - max_size = cfg.CONF.max_nodes_per_cluster - if action.action == consts.CLUSTER_SCALE_IN: - if self.best_effort: - count = min(count, current - cluster.min_size) - result = su.check_size_params(cluster, current - count, - strict=not self.best_effort) - else: - if self.best_effort: - count = min(count, max_size - current) - result = su.check_size_params(cluster, current + count, - strict=not self.best_effort) - - if result: - # failed validation - pd = { - 'status': base.CHECK_ERROR, - 'reason': result - } - else: - # passed validation - pd = { - 'status': base.CHECK_OK, - 'reason': _('Scaling request validated.'), - } - if action.action == consts.CLUSTER_SCALE_IN: - pd['deletion'] = {'count': count} - else: - pd['creation'] = {'count': count} - - action.data.update(pd) - action.store(action.context) - - return - - def post_op(self, cluster_id, action): - # update last_op for next cooldown check - ts = timeutils.utcnow(True) - cpo.ClusterPolicy.update(action.context, cluster_id, - self.id, {'last_op': ts}) - - def need_check(self, target, action): - # check if target + action matches policy targets - if not super(ScalingPolicy, self).need_check(target, action): - return False - - if target == 'BEFORE': - # Scaling policy BEFORE check should only be triggered if the - # incoming action matches the specific policy event. - # E.g. for scale-out policy the BEFORE check to select nodes for - # termination should only run for scale-out actions. - return self.event == action.action - else: - # Scaling policy AFTER check to reset cooldown timer should be - # triggered for all supported policy events (both scale-in and - # scale-out). E.g. a scale-out policy should reset cooldown timer - # whenever scale-out or scale-in action completes. - return action.action in list(self._SUPPORTED_EVENTS) diff --git a/senlin/policies/zone_placement.py b/senlin/policies/zone_placement.py deleted file mode 100644 index 357de25c7..000000000 --- a/senlin/policies/zone_placement.py +++ /dev/null @@ -1,267 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Policy for scheduling nodes across availability zones. - -NOTE: For full documentation about how the policy works, check: -https://docs.openstack.org/senlin/latest/contributor/policies/zone_v1.html -""" - -import math -import operator - -from oslo_log import log as logging - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils -from senlin.common import schema -from senlin.engine import cluster as cm -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.policies import base - -LOG = logging.getLogger(__name__) - - -class ZonePlacementPolicy(base.Policy): - """Policy for placing members of a cluster across availability zones.""" - - VERSION = '1.0' - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2016.04'}, - {'status': consts.SUPPORTED, 'since': '2016.10'}, - ] - } - PRIORITY = 300 - - TARGET = [ - ('BEFORE', consts.CLUSTER_SCALE_OUT), - ('BEFORE', consts.CLUSTER_SCALE_IN), - ('BEFORE', consts.CLUSTER_RESIZE), - ('BEFORE', consts.NODE_CREATE), - ] - - PROFILE_TYPE = [ - 'os.nova.server-1.0', - ] - - KEYS = ( - ZONES, - ) = ( - 'zones', - ) - - _AZ_KEYS = ( - ZONE_NAME, ZONE_WEIGHT, - ) = ( - 'name', 'weight', - ) - - properties_schema = { - ZONES: schema.List( - _('List of availability zones to choose from.'), - schema=schema.Map( - _('An availability zone as candidate.'), - schema={ - ZONE_NAME: schema.String( - _('Name of an availability zone.'), - ), - ZONE_WEIGHT: schema.Integer( - _('Weight of the availability zone (default is 100).'), - default=100, - required=False, - ) - }, - ), - ), - } - - def __init__(self, name, spec, **kwargs): - super(ZonePlacementPolicy, self).__init__(name, spec, **kwargs) - - self.zones = dict((z[self.ZONE_NAME], z[self.ZONE_WEIGHT]) - for z in self.properties.get(self.ZONES)) - - def validate(self, context, validate_props=False): - super(ZonePlacementPolicy, self).validate(context, validate_props) - - if not validate_props: - return True - - nc = self.nova(context.user_id, context.project_id) - input_azs = sorted(self.zones.keys()) - valid_azs = nc.validate_azs(input_azs) - invalid_azs = sorted(set(input_azs) - set(valid_azs)) - if invalid_azs: - msg = _("The specified %(key)s '%(value)s' could not be " - "found.") % {'key': self.ZONE_NAME, - 'value': list(invalid_azs)} - raise exc.InvalidSpec(message=msg) - - return True - - def _create_plan(self, current, zones, count, expand): - """Compute a placement plan based on the weights of AZs. - - :param current: Distribution of existing nodes. - :returns: A dict that contains a placement plan. - """ - # sort candidate zones by distribution and covert it into a list - candidates = sorted(zones.items(), key=operator.itemgetter(1), - reverse=expand) - - sum_weight = sum(zones.values()) - if expand: - total = count + sum(current.values()) - else: - total = sum(current.values()) - count - - remain = count - plan = dict.fromkeys(zones.keys(), 0) - - for i in range(len(zones)): - zone = candidates[i][0] - weight = candidates[i][1] - q = total * weight / float(sum_weight) - if expand: - quota = int(math.ceil(q)) - headroom = quota - current[zone] - else: - quota = int(math.floor(q)) - headroom = current[zone] - quota - - if headroom <= 0: - continue - - if headroom < remain: - plan[zone] = headroom - remain -= headroom - else: - plan[zone] = remain if remain > 0 else 0 - remain = 0 - break - - if remain > 0: - return None - - # filter out zero values - result = {} - for z, c in plan.items(): - if c > 0: - result[z] = c - - return result - - def _get_count(self, cluster_id, action): - """Get number of nodes to create or delete. - - :param cluster_id: The ID of the target cluster. - :param action: The action object which triggered this policy check. - :return: An integer value which can be 1) positive - number of nodes - to create; 2) negative - number of nodes to delete; 3) 0 - - something wrong happened, and the policy check failed. - """ - if action.action == consts.NODE_CREATE: - # skip the policy if availability zone is specified in profile - profile = action.entity.rt['profile'] - if profile.properties[profile.AVAILABILITY_ZONE]: - return 0 - return 1 - - if action.action == consts.CLUSTER_RESIZE: - if action.data.get('deletion', None): - return -action.data['deletion']['count'] - elif action.data.get('creation', None): - return action.data['creation']['count'] - - db_cluster = co.Cluster.get(action.context, cluster_id) - current = no.Node.count_by_cluster(action.context, cluster_id) - res = scaleutils.parse_resize_params(action, db_cluster, current) - if res[0] == base.CHECK_ERROR: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = res[1] - LOG.error(res[1]) - return 0 - - if action.data.get('deletion', None): - return -action.data['deletion']['count'] - else: - return action.data['creation']['count'] - - if action.action == consts.CLUSTER_SCALE_IN: - pd = action.data.get('deletion', None) - if pd is None: - return -action.inputs.get('count', 1) - else: - return -pd.get('count', 1) - - # CLUSTER_SCALE_OUT: an action that inflates the cluster - pd = action.data.get('creation', None) - if pd is None: - return action.inputs.get('count', 1) - else: - return pd.get('count', 1) - - def pre_op(self, cluster_id, action): - """Callback function when cluster membership is about to change. - - :param cluster_id: ID of the target cluster. - :param action: The action that triggers this policy check. - """ - count = self._get_count(cluster_id, action) - if count == 0: - return - - expand = True - if count < 0: - expand = False - count = -count - - cluster = cm.Cluster.load(action.context, cluster_id) - - nc = self.nova(cluster.user, cluster.project) - zones_good = nc.validate_azs(self.zones.keys()) - if len(zones_good) == 0: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = _('No availability zone found available.') - LOG.error('No availability zone found available.') - return - - zones = {} - for z, w in self.zones.items(): - if z in zones_good: - zones[z] = w - - current = cluster.get_zone_distribution(action.context, zones.keys()) - result = self._create_plan(current, zones, count, expand) - - if not result: - action.data['status'] = base.CHECK_ERROR - action.data['reason'] = _('There is no feasible plan to ' - 'handle all nodes.') - LOG.error('There is no feasible plan to handle all nodes.') - return - - if expand: - if 'creation' not in action.data: - action.data['creation'] = {} - action.data['creation']['count'] = count - action.data['creation']['zones'] = result - else: - if 'deletion' not in action.data: - action.data['deletion'] = {} - action.data['deletion']['count'] = count - action.data['deletion']['zones'] = result diff --git a/senlin/profiles/__init__.py b/senlin/profiles/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/profiles/base.py b/senlin/profiles/base.py deleted file mode 100644 index 2e534f766..000000000 --- a/senlin/profiles/base.py +++ /dev/null @@ -1,596 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet -import inspect - -from oslo_config import cfg -from oslo_context import context as oslo_context -from oslo_log import log as logging -from oslo_utils import timeutils -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.common import utils -from senlin.drivers import base as driver_base -from senlin.engine import environment -from senlin.objects import credential as co -from senlin.objects import profile as po - -LOG = logging.getLogger(__name__) - - -class Profile(object): - """Base class for profiles.""" - - VERSIONS = {} - - KEYS = ( - TYPE, VERSION, PROPERTIES, - ) = ( - 'type', 'version', 'properties', - ) - - spec_schema = { - TYPE: schema.String( - _('Name of the profile type.'), - required=True, - ), - VERSION: schema.String( - _('Version number of the profile type.'), - required=True, - ), - PROPERTIES: schema.Map( - _('Properties for the profile.'), - required=True, - ) - } - - properties_schema = {} - OPERATIONS = {} - - def __new__(cls, name, spec, **kwargs): - """Create a new profile of the appropriate class. - - :param name: The name for the profile. - :param spec: A dictionary containing the spec for the profile. - :param kwargs: Keyword arguments for profile creation. - :returns: An instance of a specific sub-class of Profile. - """ - type_name, version = schema.get_spec_version(spec) - type_str = "-".join([type_name, version]) - - if cls != Profile: - ProfileClass = cls - else: - ProfileClass = environment.global_env().get_profile(type_str) - - return super(Profile, cls).__new__(ProfileClass) - - def __init__(self, name, spec, **kwargs): - """Initialize a profile instance. - - :param name: A string that specifies the name for the profile. - :param spec: A dictionary containing the detailed profile spec. - :param kwargs: Keyword arguments for initializing the profile. - :returns: An instance of a specific sub-class of Profile. - """ - - type_name, version = schema.get_spec_version(spec) - self.type_name = type_name - self.version = version - type_str = "-".join([type_name, version]) - - self.name = name - self.spec = spec - - self.id = kwargs.get('id', None) - self.type = kwargs.get('type', type_str) - - self.user = kwargs.get('user') - self.project = kwargs.get('project') - self.domain = kwargs.get('domain') - - self.metadata = kwargs.get('metadata', {}) - - self.created_at = kwargs.get('created_at', None) - self.updated_at = kwargs.get('updated_at', None) - - self.spec_data = schema.Spec(self.spec_schema, self.spec) - self.properties = schema.Spec( - self.properties_schema, - self.spec.get(self.PROPERTIES, {}), - version) - - if not self.id: - # new object needs a context dict - self.context = self._init_context() - else: - self.context = kwargs.get('context') - - # initialize clients - self._computeclient = None - self._networkclient = None - self._orchestrationclient = None - self._workflowclient = None - self._block_storageclient = None - self._glanceclient = None - - @classmethod - def _from_object(cls, profile): - """Construct a profile from profile object. - - :param profile: a profile object that contains all required fields. - """ - kwargs = { - 'id': profile.id, - 'type': profile.type, - 'context': profile.context, - 'user': profile.user, - 'project': profile.project, - 'domain': profile.domain, - 'metadata': profile.metadata, - 'created_at': profile.created_at, - 'updated_at': profile.updated_at, - } - - return cls(profile.name, profile.spec, **kwargs) - - @classmethod - def load(cls, ctx, profile=None, profile_id=None, project_safe=True): - """Retrieve a profile object from database.""" - if profile is None: - profile = po.Profile.get(ctx, profile_id, - project_safe=project_safe) - if profile is None: - raise exc.ResourceNotFound(type='profile', id=profile_id) - - return cls._from_object(profile) - - @classmethod - def create(cls, ctx, name, spec, metadata=None): - """Create a profile object and validate it. - - :param ctx: The requesting context. - :param name: The name for the profile object. - :param spec: A dict containing the detailed spec. - :param metadata: An optional dictionary specifying key-value pairs to - be associated with the profile. - :returns: An instance of Profile. - """ - if metadata is None: - metadata = {} - - try: - profile = cls(name, spec, metadata=metadata, user=ctx.user_id, - project=ctx.project_id) - profile.validate(True) - except (exc.ResourceNotFound, exc.ESchema) as ex: - error = _("Failed in creating profile %(name)s: %(error)s" - ) % {"name": name, "error": str(ex)} - raise exc.InvalidSpec(message=error) - - profile.store(ctx) - - return profile - - @classmethod - def delete(cls, ctx, profile_id): - po.Profile.delete(ctx, profile_id) - - def store(self, ctx): - """Store the profile into database and return its ID.""" - timestamp = timeutils.utcnow(True) - - values = { - 'name': self.name, - 'type': self.type, - 'context': self.context, - 'spec': self.spec, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'meta_data': self.metadata, - } - - if self.id: - self.updated_at = timestamp - values['updated_at'] = timestamp - po.Profile.update(ctx, self.id, values) - else: - self.created_at = timestamp - values['created_at'] = timestamp - profile = po.Profile.create(ctx, values) - self.id = profile.id - - return self.id - - @classmethod - @profiler.trace('Profile.create_object', hide_args=False) - def create_object(cls, ctx, obj): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_create(obj) - - @classmethod - @profiler.trace('Profile.create_cluster_object', hide_args=False) - def create_cluster_object(cls, ctx, obj): - profile = cls.load(ctx, profile_id=obj.profile_id) - try: - ret = profile.do_cluster_create(obj) - except NotImplementedError: - return None - return ret - - @classmethod - @profiler.trace('Profile.delete_object', hide_args=False) - def delete_object(cls, ctx, obj, **params): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_delete(obj, **params) - - @classmethod - @profiler.trace('Profile.delete_cluster_object', hide_args=False) - def delete_cluster_object(cls, ctx, obj, **params): - profile = cls.load(ctx, profile_id=obj.profile_id) - try: - ret = profile.do_cluster_delete(obj, **params) - except NotImplementedError: - return None - return ret - - @classmethod - @profiler.trace('Profile.update_object', hide_args=False) - def update_object(cls, ctx, obj, new_profile_id=None, **params): - profile = cls.load(ctx, profile_id=obj.profile_id) - new_profile = None - if new_profile_id: - new_profile = cls.load(ctx, profile_id=new_profile_id) - return profile.do_update(obj, new_profile, **params) - - @classmethod - @profiler.trace('Profile.get_details', hide_args=False) - def get_details(cls, ctx, obj): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_get_details(obj) - - @classmethod - @profiler.trace('Profile.adopt_node', hide_args=False) - def adopt_node(cls, ctx, obj, type_name, overrides=None, snapshot=False): - """Adopt a node. - - :param ctx: Request context. - :param obj: A temporary node object. - :param overrides: An optional parameter that specifies the set of - properties to be overridden. - :param snapshot: A boolean flag indicating whether a snapshot should - be created before adopting the node. - :returns: A dictionary containing the profile spec created from the - specific node, or a dictionary containing error message. - """ - parts = type_name.split("-") - tmpspec = {"type": parts[0], "version": parts[1]} - profile = cls("name", tmpspec) - return profile.do_adopt(obj, overrides=overrides, snapshot=snapshot) - - @classmethod - @profiler.trace('Profile.join_cluster', hide_args=False) - def join_cluster(cls, ctx, obj, cluster_id): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_join(obj, cluster_id) - - @classmethod - @profiler.trace('Profile.leave_cluster', hide_args=False) - def leave_cluster(cls, ctx, obj): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_leave(obj) - - @classmethod - @profiler.trace('Profile.check_object', hide_args=False) - def check_object(cls, ctx, obj): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_check(obj) - - @classmethod - @profiler.trace('Profile.check_object', hide_args=False) - def healthcheck_object(cls, ctx, obj, health_check_type): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_healthcheck(obj, health_check_type) - - @classmethod - @profiler.trace('Profile.recover_object', hide_args=False) - def recover_object(cls, ctx, obj, **options): - profile = cls.load(ctx, profile_id=obj.profile_id) - return profile.do_recover(obj, **options) - - def validate(self, validate_props=False): - """Validate the schema and the data provided.""" - # general validation - self.spec_data.validate() - self.properties.validate() - - ctx_dict = self.properties.get('context', {}) - if ctx_dict: - argspec = inspect.getfullargspec(context.RequestContext.__init__) - valid_keys = argspec.args - bad_keys = [k for k in ctx_dict if k not in valid_keys] - if bad_keys: - msg = _("Some keys in 'context' are invalid: %s") % bad_keys - raise exc.ESchema(message=msg) - - if validate_props: - self.do_validate(obj=self) - - @classmethod - def get_schema(cls): - return dict((name, dict(schema)) - for name, schema in cls.properties_schema.items()) - - @classmethod - def get_ops(cls): - return dict((name, dict(schema)) - for name, schema in cls.OPERATIONS.items()) - - def _init_context(self): - profile_context = {} - if self.CONTEXT in self.properties: - profile_context = self.properties[self.CONTEXT] or {} - - ctx_dict = context.get_service_credentials(**profile_context) - - ctx_dict.pop('project_name', None) - ctx_dict.pop('project_domain_name', None) - - return ctx_dict - - def _build_conn_params(self, user, project): - """Build connection params for specific user and project. - - :param user: The ID of the user for which a trust will be used. - :param project: The ID of the project for which a trust will be used. - :returns: A dict containing the required parameters for connection - creation. - """ - cred = co.Credential.get(oslo_context.get_current(), user, project) - if cred is None: - raise exc.TrustNotFound(trustor=user) - - trust_id = cred.cred['openstack']['trust'] - - # This is supposed to be trust-based authentication - params = copy.deepcopy(self.context) - params['trust_id'] = trust_id - - return params - - def compute(self, obj): - """Construct compute client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - - if self._computeclient is not None: - return self._computeclient - params = self._build_conn_params(obj.user, obj.project) - self._computeclient = driver_base.SenlinDriver().compute(params) - return self._computeclient - - def glance(self, obj): - """Construct glance client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - if self._glanceclient is not None: - return self._glanceclient - params = self._build_conn_params(obj.user, obj.project) - self._glanceclient = driver_base.SenlinDriver().glance(params) - return self._glanceclient - - def network(self, obj): - """Construct network client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - if self._networkclient is not None: - return self._networkclient - params = self._build_conn_params(obj.user, obj.project) - self._networkclient = driver_base.SenlinDriver().network(params) - return self._networkclient - - def orchestration(self, obj): - """Construct orchestration client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - if self._orchestrationclient is not None: - return self._orchestrationclient - params = self._build_conn_params(obj.user, obj.project) - oc = driver_base.SenlinDriver().orchestration(params) - self._orchestrationclient = oc - return oc - - def workflow(self, obj): - if self._workflowclient is not None: - return self._workflowclient - params = self._build_conn_params(obj.user, obj.project) - self._workflowclient = driver_base.SenlinDriver().workflow(params) - return self._workflowclient - - def block_storage(self, obj): - """Construct cinder client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - if self._block_storageclient is not None: - return self._block_storageclient - params = self._build_conn_params(obj.user, obj.project) - self._block_storageclient = driver_base.SenlinDriver().block_storage( - params) - return self._block_storageclient - - def do_create(self, obj): - """For subclass to override.""" - raise NotImplementedError - - def do_cluster_create(self, obj): - """For subclass to override.""" - raise NotImplementedError - - def do_delete(self, obj, **params): - """For subclass to override.""" - raise NotImplementedError - - def do_cluster_delete(self, obj): - """For subclass to override.""" - raise NotImplementedError - - def do_update(self, obj, new_profile, **params): - """For subclass to override.""" - LOG.warning("Update operation not supported.") - return True - - def do_check(self, obj): - """For subclass to override.""" - LOG.warning("Check operation not supported.") - return True - - def do_healthcheck(self, obj): - """Default healthcheck operation. - - This is provided as a fallback if a specific profile type does not - override this method. - - :param obj: The node object to operate on. - :return status: True indicates node is healthy, False indicates - it is unhealthy. - """ - return self.do_check(obj) - - def do_get_details(self, obj): - """For subclass to override.""" - LOG.warning("Get_details operation not supported.") - return {} - - def do_adopt(self, obj, overrides=None, snapshot=False): - """For subclass to override.""" - LOG.warning("Adopt operation not supported.") - return {} - - def do_join(self, obj, cluster_id): - """For subclass to override to perform extra operations.""" - LOG.warning("Join operation not specialized.") - return True - - def do_leave(self, obj): - """For subclass to override to perform extra operations.""" - LOG.warning("Leave operation not specialized.") - return True - - def do_recover(self, obj, **options): - """Default recover operation. - - This is provided as a fallback if a specific profile type does not - override this method. - - :param obj: The node object to operate on. - :param options: Keyword arguments for the recover operation. - :return id: New id of the recovered resource or None if recovery - failed. - :return status: True indicates successful recovery, False indicates - failure. - """ - operation = options.get('operation', None) - force_recreate = options.get('force_recreate', None) - delete_timeout = options.get('delete_timeout', None) - - if operation.upper() != consts.RECOVER_RECREATE: - LOG.error("Recover operation not supported: %s", operation) - return None, False - - extra_params = options.get('operation_params', None) - fence_compute = False - if extra_params: - fence_compute = extra_params.get('fence_compute', False) - - try: - self.do_delete(obj, force=fence_compute, timeout=delete_timeout) - except exc.EResourceDeletion as ex: - if force_recreate: - # log error and continue on to creating the node - LOG.warning('Failed to delete node during recovery action: %s', - ex) - else: - raise exc.EResourceOperation(op='recovering', type='node', - id=obj.id, - message=str(ex)) - - # pause to allow deleted resource to get reclaimed by nova - # this is needed to avoid a problem when the compute resources are - # at their quota limit. The deleted resource has to become available - # so that the new node can be created. - eventlet.sleep(cfg.CONF.batch_interval) - - res = None - try: - res = self.do_create(obj) - except exc.EResourceCreation as ex: - raise exc.EResourceOperation(op='recovering', type='node', - id=obj.id, message=str(ex), - resource_id=ex.resource_id) - return res, True - - def do_validate(self, obj): - """For subclass to override.""" - LOG.warning("Validate operation not supported.") - return True - - def to_dict(self): - pb_dict = { - 'id': self.id, - 'name': self.name, - 'type': self.type, - 'user': self.user, - 'project': self.project, - 'domain': self.domain, - 'spec': self.spec, - 'metadata': self.metadata, - 'created_at': utils.isotime(self.created_at), - 'updated_at': utils.isotime(self.updated_at), - } - return pb_dict - - def validate_for_update(self, new_profile): - non_updatables = [] - for (k, v) in new_profile.properties.items(): - if self.properties.get(k, None) != v: - if not self.properties_schema[k].updatable: - non_updatables.append(k) - - if not non_updatables: - return True - - msg = ", ".join(non_updatables) - LOG.error("The following properties are not updatable: %s.", msg) - return False diff --git a/senlin/profiles/container/__init__.py b/senlin/profiles/container/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/profiles/container/docker.py b/senlin/profiles/container/docker.py deleted file mode 100644 index f52c47a6f..000000000 --- a/senlin/profiles/container/docker.py +++ /dev/null @@ -1,451 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.common import utils -from senlin.db.sqlalchemy import api as db_api -from senlin.drivers.container import docker_v1 as docker_driver -from senlin.engine import cluster -from senlin.engine import node as node_mod -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.profiles import base - - -class DockerProfile(base.Profile): - """Profile for a docker container.""" - VERSIONS = { - '1.0': [ - {'status': consts.EXPERIMENTAL, 'since': '2017.02'} - ] - } - - _VALID_HOST_TYPES = [ - HOST_NOVA_SERVER, HOST_HEAT_STACK, - ] = [ - "os.nova.server", "os.heat.stack", - ] - - KEYS = ( - CONTEXT, IMAGE, NAME, COMMAND, HOST_NODE, HOST_CLUSTER, PORT, - ) = ( - 'context', 'image', 'name', 'command', 'host_node', 'host_cluster', - 'port', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating containers.') - ), - IMAGE: schema.String( - _('The image used to create a container'), - required=True, - ), - NAME: schema.String( - _('The name of the container.'), - updatable=True, - ), - COMMAND: schema.String( - _('The command to run when container is started.') - ), - PORT: schema.Integer( - _('The port number used to connect to docker daemon.'), - default=2375 - ), - HOST_NODE: schema.String( - _('The node on which container will be launched.') - ), - HOST_CLUSTER: schema.String( - _('The cluster on which container will be launched.') - ), - } - - OP_NAMES = ( - OP_RESTART, OP_PAUSE, OP_UNPAUSE, - ) = ( - 'restart', 'pause', 'unpause', - ) - - _RESTART_WAIT = (RESTART_WAIT) = ('wait_time') - - OPERATIONS = { - OP_RESTART: schema.Operation( - _("Restart a container."), - schema={ - RESTART_WAIT: schema.IntegerParam( - _("Number of seconds to wait before killing the " - "container.") - ) - } - ), - OP_PAUSE: schema.Operation( - _("Pause a container.") - ), - OP_UNPAUSE: schema.Operation( - _("Unpause a container.") - ) - } - - def __init__(self, type_name, name, **kwargs): - super(DockerProfile, self).__init__(type_name, name, **kwargs) - - self._dockerclient = None - self.container_id = None - self.host = None - self.cluster = None - - @classmethod - def create(cls, ctx, name, spec, metadata=None): - profile = super(DockerProfile, cls).create(ctx, name, spec, metadata) - - host_cluster = profile.properties.get(profile.HOST_CLUSTER, None) - if host_cluster: - db_api.cluster_add_dependents(ctx, host_cluster, profile.id) - - host_node = profile.properties.get(profile.HOST_NODE, None) - if host_node: - db_api.node_add_dependents(ctx, host_node, profile.id, 'profile') - - return profile - - @classmethod - def delete(cls, ctx, profile_id): - obj = cls.load(ctx, profile_id=profile_id) - cluster_id = obj.properties.get(obj.HOST_CLUSTER, None) - if cluster_id: - db_api.cluster_remove_dependents(ctx, cluster_id, profile_id) - - node_id = obj.properties.get(obj.HOST_NODE, None) - if node_id: - db_api.node_remove_dependents(ctx, node_id, profile_id, 'profile') - - super(DockerProfile, cls).delete(ctx, profile_id) - - def docker(self, obj): - """Construct docker client based on object. - - :param obj: Object for which the client is created. It is expected to - be None when retrieving an existing client. When creating - a client, it contains the user and project to be used. - """ - if self._dockerclient is not None: - return self._dockerclient - - host_node = self.properties.get(self.HOST_NODE, None) - host_cluster = self.properties.get(self.HOST_CLUSTER, None) - ctx = context.get_admin_context() - self.host = self._get_host(ctx, host_node, host_cluster) - - # TODO(Anyone): Check node.data for per-node host selection - host_type = self.host.rt['profile'].type_name - if host_type not in self._VALID_HOST_TYPES: - msg = _("Type of host node (%s) is not supported") % host_type - raise exc.InternalError(message=msg) - - host_ip = self._get_host_ip(obj, self.host.physical_id, host_type) - if host_ip is None: - msg = _("Unable to determine the IP address of host node") - raise exc.InternalError(message=msg) - - url = 'tcp://%(ip)s:%(port)d' % {'ip': host_ip, - 'port': self.properties[self.PORT]} - self._dockerclient = docker_driver.DockerClient(url) - return self._dockerclient - - def _get_host(self, ctx, host_node, host_cluster): - """Determine which node to launch container on. - - :param ctx: An instance of the request context. - :param host_node: The uuid of the hosting node. - :param host_cluster: The uuid of the hosting cluster. - """ - host = None - if host_node is not None: - try: - host = node_mod.Node.load(ctx, node_id=host_node) - except exc.ResourceNotFound as ex: - msg = ex.enhance_msg('host', ex) - raise exc.InternalError(message=msg) - return host - - if host_cluster is not None: - host = self._get_random_node(ctx, host_cluster) - - return host - - def _get_random_node(self, ctx, host_cluster): - """Get a node randomly from the host cluster. - - :param ctx: An instance of the request context. - :param host_cluster: The uuid of the hosting cluster. - """ - self.cluster = None - try: - self.cluster = cluster.Cluster.load(ctx, cluster_id=host_cluster) - except exc.ResourceNotFound as ex: - msg = ex.enhance_msg('host', ex) - raise exc.InternalError(message=msg) - - filters = {consts.NODE_STATUS: consts.NS_ACTIVE} - nodes = no.Node.get_all_by_cluster(ctx, cluster_id=host_cluster, - filters=filters) - if len(nodes) == 0: - msg = _("The cluster (%s) contains no active nodes") % host_cluster - raise exc.InternalError(message=msg) - - # TODO(anyone): Should pick a node by its load - db_node = nodes[random.randrange(len(nodes))] - return node_mod.Node.load(ctx, db_node=db_node) - - def _get_host_ip(self, obj, host_node, host_type): - """Fetch the ip address of physical node. - - :param obj: The node object representing the container instance. - :param host_node: The name or ID of the hosting node object. - :param host_type: The type of the hosting node, which can be either a - nova server or a heat stack. - :returns: The fixed IP address of the hosting node. - """ - host_ip = None - if host_type == self.HOST_NOVA_SERVER: - server = self.compute(obj).server_get(host_node) - private_addrs = server.addresses['private'] - for addr in private_addrs: - if addr['version'] == 4 and addr['OS-EXT-IPS:type'] == 'fixed': - host_ip = addr['addr'] - elif host_type == self.HOST_HEAT_STACK: - stack = self.orchestration(obj).stack_get(host_node) - outputs = stack.outputs or {} - if outputs: - for output in outputs: - if output['output_key'] == 'fixed_ip': - host_ip = output['output_value'] - break - - if not outputs or host_ip is None: - msg = _("Output 'fixed_ip' is missing from the provided stack" - " node") - raise exc.InternalError(message=msg) - - return host_ip - - def do_validate(self, obj): - """Validate if the spec has provided valid configuration. - - :param obj: The node object. - """ - cluster = self.properties[self.HOST_CLUSTER] - node = self.properties[self.HOST_NODE] - if all([cluster, node]): - msg = _("Either '%(c)s' or '%(n)s' must be specified, but not " - "both.") % {'c': self.HOST_CLUSTER, 'n': self.HOST_NODE} - raise exc.InvalidSpec(message=msg) - - if not any([cluster, node]): - msg = _("Either '%(c)s' or '%(n)s' must be specified." - ) % {'c': self.HOST_CLUSTER, 'n': self.HOST_NODE} - raise exc.InvalidSpec(message=msg) - - if cluster: - try: - co.Cluster.find(self.context, cluster) - except (exc.ResourceNotFound, exc.MultipleChoices): - msg = _("The specified %(key)s '%(val)s' could not be found " - "or is not unique." - ) % {'key': self.HOST_CLUSTER, 'val': cluster} - raise exc.InvalidSpec(message=msg) - - if node: - try: - no.Node.find(self.context, node) - except (exc.ResourceNotFound, exc.MultipleChoices): - msg = _("The specified %(key)s '%(val)s' could not be found " - "or is not unique." - ) % {'key': self.HOST_NODE, 'val': node} - raise exc.InvalidSpec(message=msg) - - def do_create(self, obj): - """Create a container instance using the given profile. - - :param obj: The node object for this container. - :returns: ID of the container instance or ``None`` if driver fails. - :raises: `EResourceCreation` - """ - name = self.properties[self.NAME] - if name is None: - name = '-'.join([obj.name, utils.random_name()]) - - params = { - 'image': self.properties[self.IMAGE], - 'name': name, - 'command': self.properties[self.COMMAND], - } - - try: - ctx = context.get_service_context(project=obj.project, - user=obj.user) - dockerclient = self.docker(obj) - db_api.node_add_dependents(ctx, self.host.id, obj.id) - container = dockerclient.container_create(**params) - dockerclient.start(container['Id']) - except exc.InternalError as ex: - raise exc.EResourceCreation(type='container', - message=str(ex)) - - self.container_id = container['Id'][:36] - return self.container_id - - def do_delete(self, obj): - """Delete a container node. - - :param obj: The node object representing the container. - :returns: `None` - """ - if not obj.physical_id: - return - - try: - self.handle_stop(obj) - self.docker(obj).container_delete(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='container', - id=obj.physical_id, - message=str(ex)) - ctx = context.get_admin_context() - db_api.node_remove_dependents(ctx, self.host.id, obj.id) - return - - def do_update(self, obj, new_profile=None, **params): - """Perform update on the container. - - :param obj: the container to operate on - :param new_profile: the new profile for the container. - :param params: a dictionary of optional parameters. - :returns: True if update was successful or False otherwise. - :raises: `EResourceUpdate` if operation fails. - """ - self.server_id = obj.physical_id - if not self.server_id: - return False - - if not new_profile: - return False - - if not self.validate_for_update(new_profile): - return False - - name_changed, new_name = self._check_container_name(obj, new_profile) - if name_changed: - self._update_name(obj, new_name) - - return True - - def _check_container_name(self, obj, profile): - """Check if there is a new name to be assigned to the container. - - :param obj: The node object to operate on. - :param new_profile: The new profile which may contain a name for - the container. - :return: A tuple consisting a boolean indicating whether the name - needs change and the container name determined. - """ - old_name = self.properties[self.NAME] or obj.name - new_name = profile.properties[self.NAME] or obj.name - if old_name == new_name: - return False, new_name - return True, new_name - - def _update_name(self, obj, new_name): - try: - self.docker(obj).rename(obj.physical_id, new_name) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='container', id=obj.physical_id, - message=str(ex)) - - def handle_reboot(self, obj, **options): - """Handler for a reboot operation. - - :param obj: The node object representing the container. - :returns: None - """ - if not obj.physical_id: - return - - if 'timeout' in options: - params = {'timeout': options['timeout']} - else: - params = {} - try: - self.docker(obj).restart(obj.physical_id, **params) - except exc.InternalError as ex: - raise exc.EResourceOperation(type='container', - id=obj.physical_id[:8], - op='rebooting', - message=str(ex)) - return - - def handle_pause(self, obj): - """Handler for a pause operation. - - :param obj: The node object representing the container. - :returns: None - """ - if not obj.physical_id: - return - - try: - self.docker(obj).pause(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(type='container', - id=obj.physical_id[:8], - op='pausing', - message=str(ex)) - return - - def handle_unpause(self, obj): - """Handler for an unpause operation. - - :param obj: The node object representing the container. - :returns: None - """ - if not obj.physical_id: - return - - try: - self.docker(obj).unpause(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(type='container', - id=obj.physical_id[:8], - op='unpausing', - message=str(ex)) - return - - def handle_stop(self, obj, **options): - """Handler for the stop operation.""" - if not obj.physical_id: - return - timeout = options.get('timeout', None) - if timeout: - timeout = int(timeout) - try: - self.docker(obj).stop(obj.physical_id, timeout=timeout) - except exc.InternalError as ex: - raise exc.EResourceOperation(type='container', - id=obj.physical_id[:8], - op='stop', - message=str(ex)) diff --git a/senlin/profiles/os/__init__.py b/senlin/profiles/os/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/profiles/os/heat/__init__.py b/senlin/profiles/os/heat/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/profiles/os/heat/stack.py b/senlin/profiles/os/heat/stack.py deleted file mode 100644 index 698131dce..000000000 --- a/senlin/profiles/os/heat/stack.py +++ /dev/null @@ -1,412 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.common import utils -from senlin.profiles import base - -LOG = logging.getLogger(__name__) - - -class StackProfile(base.Profile): - """Profile for an OpenStack Heat stack.""" - - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ] - } - - KEYS = ( - CONTEXT, TEMPLATE, TEMPLATE_URL, PARAMETERS, - FILES, TIMEOUT, DISABLE_ROLLBACK, ENVIRONMENT, - ) = ( - 'context', 'template', 'template_url', 'parameters', - 'files', 'timeout', 'disable_rollback', 'environment', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('A dictionary for specifying the customized context for ' - 'stack operations'), - default={}, - ), - TEMPLATE: schema.Map( - _('Heat stack template.'), - default={}, - updatable=True, - ), - TEMPLATE_URL: schema.String( - _('Heat stack template url.'), - default='', - updatable=True, - ), - PARAMETERS: schema.Map( - _('Parameters to be passed to Heat for stack operations.'), - default={}, - updatable=True, - ), - FILES: schema.Map( - _('Contents of files referenced by the template, if any.'), - default={}, - updatable=True, - ), - TIMEOUT: schema.Integer( - _('A integer that specifies the number of minutes that a ' - 'stack operation times out.'), - updatable=True, - ), - DISABLE_ROLLBACK: schema.Boolean( - _('A boolean specifying whether a stack operation can be ' - 'rolled back.'), - default=True, - updatable=True, - ), - ENVIRONMENT: schema.Map( - _('A map that specifies the environment used for stack ' - 'operations.'), - default={}, - updatable=True, - ) - } - - OP_NAMES = ( - OP_ABANDON, - ) = ( - 'abandon', - ) - - OPERATIONS = { - OP_ABANDON: schema.Map( - _('Abandon a heat stack node.'), - ) - } - - def __init__(self, type_name, name, **kwargs): - super(StackProfile, self).__init__(type_name, name, **kwargs) - self.stack_id = None - - def validate(self, validate_props=False): - """Validate the schema and the data provided.""" - # general validation - self.spec_data.validate() - self.properties.validate() - # validate template - template = self.properties[self.TEMPLATE] - template_url = self.properties[self.TEMPLATE_URL] - if not template and not template_url: - msg = _("Both template and template_url are not specified " - "for profile '%s'.") % self.name - raise exc.InvalidSpec(message=msg) - - if validate_props: - self.do_validate(obj=self) - - def do_validate(self, obj): - """Validate the stack template used by a node. - - :param obj: Node object to operate. - :returns: True if validation succeeds. - :raises: `InvalidSpec` exception is raised if template is invalid. - """ - kwargs = { - 'stack_name': utils.random_name(), - 'template': self.properties[self.TEMPLATE], - 'template_url': self.properties[self.TEMPLATE_URL], - 'parameters': self.properties[self.PARAMETERS], - 'files': self.properties[self.FILES], - 'environment': self.properties[self.ENVIRONMENT], - 'preview': True, - } - try: - self.orchestration(obj).stack_create(**kwargs) - except exc.InternalError as ex: - msg = _('Failed in validating template: %s') % str(ex) - raise exc.InvalidSpec(message=msg) - - return True - - def do_create(self, obj): - """Create a heat stack using the given node object. - - :param obj: The node object to operate on. - :returns: The UUID of the heat stack created. - """ - tags = ["cluster_node_id=%s" % obj.id] - if obj.cluster_id: - tags.append('cluster_id=%s' % obj.cluster_id) - tags.append('cluster_node_index=%s' % obj.index) - kwargs = { - 'stack_name': obj.name + '-' + utils.random_name(8), - 'template': self.properties[self.TEMPLATE], - 'template_url': self.properties[self.TEMPLATE_URL], - 'timeout_mins': self.properties[self.TIMEOUT], - 'disable_rollback': self.properties[self.DISABLE_ROLLBACK], - 'parameters': self.properties[self.PARAMETERS], - 'files': self.properties[self.FILES], - 'environment': self.properties[self.ENVIRONMENT], - 'tags': ",".join(tags) - } - - try: - stack = self.orchestration(obj).stack_create(**kwargs) - - # Timeout = None means we will use the 'default_action_timeout' - # It can be overridden by the TIMEOUT profile properties - timeout = None - if self.properties[self.TIMEOUT]: - timeout = self.properties[self.TIMEOUT] * 60 - - self.orchestration(obj).wait_for_stack(stack.id, 'CREATE_COMPLETE', - timeout=timeout) - return stack.id - except exc.InternalError as ex: - raise exc.EResourceCreation(type='stack', - message=str(ex)) - - def do_delete(self, obj, **params): - """Delete the physical stack behind the node object. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always returns True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with heat fails. - """ - stack_id = obj.physical_id - if not stack_id: - return True - - ignore_missing = params.get('ignore_missing', True) - try: - self.orchestration(obj).stack_delete(stack_id, ignore_missing) - self.orchestration(obj).wait_for_stack_delete(stack_id) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='stack', id=stack_id, - message=str(ex)) - return True - - def do_update(self, obj, new_profile, **params): - """Perform update on object. - - :param obj: the node object to operate on - :param new_profile: the new profile used for updating - :param params: other parameters for the update request. - :returns: A boolean indicating whether the operation is successful. - """ - self.stack_id = obj.physical_id - if not self.stack_id: - return False - - if not self.validate_for_update(new_profile): - return False - - fields = {} - new_template = new_profile.properties[new_profile.TEMPLATE] - if new_template != self.properties[self.TEMPLATE]: - fields['template'] = new_template - - new_params = new_profile.properties[new_profile.PARAMETERS] - if new_params != self.properties[self.PARAMETERS]: - fields['parameters'] = new_params - - new_timeout = new_profile.properties[new_profile.TIMEOUT] - if new_timeout != self.properties[self.TIMEOUT]: - fields['timeout_mins'] = new_timeout - - new_dr = new_profile.properties[new_profile.DISABLE_ROLLBACK] - if new_dr != self.properties[self.DISABLE_ROLLBACK]: - fields['disable_rollback'] = new_dr - - new_files = new_profile.properties[new_profile.FILES] - if new_files != self.properties[self.FILES]: - fields['files'] = new_files - - new_environment = new_profile.properties[new_profile.ENVIRONMENT] - if new_environment != self.properties[self.ENVIRONMENT]: - fields['environment'] = new_environment - - if not fields: - return True - - try: - hc = self.orchestration(obj) - # Timeout = None means we will use the 'default_action_timeout' - # It can be overridden by the TIMEOUT profile properties - timeout = None - if self.properties[self.TIMEOUT]: - timeout = self.properties[self.TIMEOUT] * 60 - hc.stack_update(self.stack_id, **fields) - hc.wait_for_stack(self.stack_id, 'UPDATE_COMPLETE', - timeout=timeout) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='stack', id=self.stack_id, - message=str(ex)) - - return True - - def do_check(self, obj): - """Check stack status. - - :param obj: Node object to operate. - :returns: True if check succeeded, or False otherwise. - """ - stack_id = obj.physical_id - if stack_id is None: - return False - - hc = self.orchestration(obj) - try: - # Timeout = None means we will use the 'default_action_timeout' - # It can be overridden by the TIMEOUT profile properties - timeout = None - if self.properties[self.TIMEOUT]: - timeout = self.properties[self.TIMEOUT] * 60 - hc.stack_check(stack_id) - hc.wait_for_stack(stack_id, 'CHECK_COMPLETE', timeout=timeout) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='checking', type='stack', - id=stack_id, - message=str(ex)) - - return True - - def do_get_details(self, obj): - if not obj.physical_id: - return {} - - try: - stack = self.orchestration(obj).stack_get(obj.physical_id) - return stack.to_dict() - except exc.InternalError as ex: - return { - 'Error': { - 'code': ex.code, - 'message': str(ex) - } - } - - def do_adopt(self, obj, overrides=None, snapshot=False): - """Adopt an existing stack node for management. - - :param obj: A node object for this operation. It could be a puppet - node that provides only 'user', 'project' and 'physical_id' - properties when doing a preview. It can be a real Node object for - node adoption. - :param overrides: A dict containing the properties that will be - overridden when generating a profile for the stack. - :param snapshot: A boolean flag indicating whether the profile should - attempt a snapshot operation before adopting the stack. If set to - True, the ID of the snapshot will be used as the image ID. - - :returns: A dict containing the spec created from the stack object or - a dict containing error information if failure occurred. - """ - driver = self.orchestration(obj) - - # TODO(Qiming): Add snapshot support - # snapshot = driver.snapshot_create(...) - - try: - stack = driver.stack_get(obj.physical_id) - tmpl = driver.stack_get_template(obj.physical_id) - env = driver.stack_get_environment(obj.physical_id) - files = driver.stack_get_files(obj.physical_id) - except exc.InternalError as ex: - return {'Error': {'code': ex.code, 'message': str(ex)}} - - spec = { - self.ENVIRONMENT: env.to_dict(), - self.FILES: files, - self.TEMPLATE: tmpl.to_dict(), - self.PARAMETERS: dict((k, v) for k, v in stack.parameters.items() - if k.find('OS::', 0) < 0), - self.TIMEOUT: stack.timeout_mins, - self.DISABLE_ROLLBACK: stack.is_rollback_disabled - } - if overrides: - spec.update(overrides) - - return spec - - def _refresh_tags(self, current, node, add=False): - """Refresh tag list. - - :param current: Current list of tags. - :param node: The node object. - :param add: Flag indicating whether new tags are added. - :returns: (tags, updated) where tags contains a new list of tags and - updated indicates whether new tag list differs from the old - one. - """ - tags = [] - for tag in current: - if tag.find('cluster_id=') == 0: - continue - elif tag.find('cluster_node_id=') == 0: - continue - elif tag.find('cluster_node_index=') == 0: - continue - if tag.strip() != "": - tags.append(tag.strip()) - - if add: - tags.append('cluster_id=' + node.cluster_id) - tags.append('cluster_node_id=' + node.id) - tags.append('cluster_node_index=%s' % node.index) - - tag_str = ",".join(tags) - return (tag_str, tags != current) - - def do_join(self, obj, cluster_id): - if not obj.physical_id: - return False - - hc = self.orchestration(obj) - try: - stack = hc.stack_get(obj.physical_id) - tags, updated = self._refresh_tags(stack.tags, obj, True) - field = {'tags': tags} - if updated: - hc.stack_update(obj.physical_id, **field) - except exc.InternalError as ex: - LOG.error('Failed in updating stack tags: %s.', ex) - return False - - return True - - def do_leave(self, obj): - if not obj.physical_id: - return False - - hc = self.orchestration(obj) - try: - stack = hc.stack_get(obj.physical_id) - tags, updated = self._refresh_tags(stack.tags, obj, False) - field = {'tags': tags} - if updated: - hc.stack_update(obj.physical_id, **field) - except exc.InternalError as ex: - LOG.error('Failed in updating stack tags: %s.', ex) - return False - - return True - - def handle_abandon(self, obj, **options): - """Handler for abandoning a heat stack node.""" - pass diff --git a/senlin/profiles/os/nova/__init__.py b/senlin/profiles/os/nova/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/profiles/os/nova/server.py b/senlin/profiles/os/nova/server.py deleted file mode 100644 index 2e238f435..000000000 --- a/senlin/profiles/os/nova/server.py +++ /dev/null @@ -1,2182 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import copy - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import encodeutils - -from senlin.common import constraints -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import schema -from senlin.objects import node as node_obj -from senlin.profiles import base - -LOG = logging.getLogger(__name__) - - -class ServerProfile(base.Profile): - """Profile for an OpenStack Nova server.""" - - VERSION = '1.2' - VERSIONS = { - '1.0': [ - {'status': consts.SUPPORTED, 'since': '2016.04'} - ], - '1.1': [ - {'status': consts.SUPPORTED, 'since': '2023.04'} - ], - '1.2': [ - {'status': consts.SUPPORTED, 'since': '2023.07'} - ] - } - - KEYS = ( - CONTEXT, ADMIN_PASS, AUTO_DISK_CONFIG, AVAILABILITY_ZONE, - BLOCK_DEVICE_MAPPING_V2, - CONFIG_DRIVE, FLAVOR, IMAGE, KEY_NAME, METADATA, - NAME, NETWORKS, PERSONALITY, SECURITY_GROUPS, - USER_DATA, SCHEDULER_HINTS, - ) = ( - 'context', 'admin_pass', 'auto_disk_config', 'availability_zone', - 'block_device_mapping_v2', - 'config_drive', 'flavor', 'image', 'key_name', 'metadata', - 'name', 'networks', 'personality', 'security_groups', - 'user_data', 'scheduler_hints', - ) - - BDM2_KEYS = ( - BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, - BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, - BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, - BDM2_DELETE_ON_TERMINATION, BDM2_VOLUME_TYPE, - ) = ( - 'uuid', 'source_type', 'destination_type', 'disk_bus', - 'device_name', 'volume_size', 'guest_format', 'boot_index', - 'device_type', 'delete_on_termination', 'volume_type' - ) - - NETWORK_KEYS = ( - PORT, VNIC_TYPE, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, - FLOATING_NETWORK, FLOATING_IP, SUBNET - ) = ( - 'port', 'vnic_type', 'fixed_ip', 'network', 'security_groups', - 'floating_network', 'floating_ip', 'subnet' - ) - - PERSONALITY_KEYS = ( - PATH, CONTENTS, - ) = ( - 'path', 'contents', - ) - - SCHEDULER_HINTS_KEYS = ( - GROUP, - ) = ( - 'group', - ) - - properties_schema = { - CONTEXT: schema.Map( - _('Customized security context for operating servers.'), - ), - ADMIN_PASS: schema.String( - _('Password for the administrator account.'), - ), - AUTO_DISK_CONFIG: schema.Boolean( - _('Whether the disk partition is done automatically.'), - default=True, - ), - AVAILABILITY_ZONE: schema.String( - _('Name of availability zone for running the server.'), - ), - BLOCK_DEVICE_MAPPING_V2: schema.List( - _('A list specifying the properties of block devices to be used ' - 'for this server.'), - schema=schema.Map( - _('A map specifying the properties of a block device to be ' - 'used by the server.'), - schema={ - BDM2_UUID: schema.String( - _('ID of the source image, snapshot or volume'), - ), - BDM2_SOURCE_TYPE: schema.String( - _("Volume source type, must be one of 'image', " - "'snapshot', 'volume' or 'blank'"), - required=True, - ), - BDM2_DESTINATION_TYPE: schema.String( - _("Volume destination type, must be 'volume' or " - "'local'"), - required=True, - ), - BDM2_DISK_BUS: schema.String( - _('Bus of the device.'), - ), - BDM2_DEVICE_NAME: schema.String( - _('Name of the device(e.g. vda, xda, ....).'), - ), - BDM2_VOLUME_SIZE: schema.Integer( - _('Size of the block device in MB(for swap) and ' - 'in GB(for other formats)'), - required=True, - ), - BDM2_GUEST_FORMAT: schema.String( - _('Specifies the disk file system format(e.g. swap, ' - 'ephemeral, ...).'), - ), - BDM2_BOOT_INDEX: schema.Integer( - _('Define the boot order of the device'), - ), - BDM2_DEVICE_TYPE: schema.String( - _('Type of the device(e.g. disk, cdrom, ...).'), - ), - BDM2_DELETE_ON_TERMINATION: schema.Boolean( - _('Whether to delete the volume when the server ' - 'stops.'), - ), - BDM2_VOLUME_TYPE: schema.String( - _('Id or name of volume type.'), - ), - } - ), - ), - CONFIG_DRIVE: schema.Boolean( - _('Whether config drive should be enabled for the server.'), - ), - FLAVOR: schema.String( - _('ID of flavor used for the server.'), - required=True, - updatable=True, - ), - IMAGE: schema.String( - # IMAGE is not required, because there could be BDM or BDMv2 - # support and the corresponding settings effective - _('ID of image to be used for the new server.'), - updatable=True, - ), - KEY_NAME: schema.String( - _('Name of Nova keypair to be injected to server.'), - ), - METADATA: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'server created. Both key and value must be <=255 chars.'), - updatable=True, - ), - NAME: schema.String( - _('Name of the server. When omitted, the node name will be used.'), - updatable=True, - ), - NETWORKS: schema.List( - _('List of networks for the server.'), - schema=schema.Map( - _('A map specifying the properties of a network for uses.'), - schema={ - NETWORK: schema.String( - _('Name or ID of network to create a port on.'), - ), - PORT: schema.String( - _('Port ID to be used by the network.'), - ), - VNIC_TYPE: schema.String( - _('Define vnic_type to be used by port'), - ), - FIXED_IP: schema.String( - _('Fixed IP to be used by the network.'), - ), - PORT_SECURITY_GROUPS: schema.List( - _('A list of security groups to be attached to ' - 'this port.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - FLOATING_NETWORK: schema.String( - _('The network on which to create a floating IP'), - ), - FLOATING_IP: schema.String( - _('The floating IP address to be associated with ' - 'this port.'), - ), - SUBNET: schema.String( - _('Subnet in which to allocate the IP address for ' - 'this port.'), - ), - }, - ), - updatable=True, - ), - PERSONALITY: schema.List( - _('List of files to be injected into the server, where each.'), - schema=schema.Map( - _('A map specifying the path & contents for an injected ' - 'file.'), - schema={ - PATH: schema.String( - _('In-instance path for the file to be injected.'), - required=True, - ), - CONTENTS: schema.String( - _('Contents of the file to be injected.'), - required=True, - ), - }, - ), - ), - SCHEDULER_HINTS: schema.Map( - _('A collection of key/value pairs to be associated with the ' - 'Scheduler hints. Both key and value must be <=255 chars.'), - ), - - SECURITY_GROUPS: schema.List( - _('List of security groups.'), - schema=schema.String( - _('Name of a security group'), - required=True, - ), - ), - USER_DATA: schema.String( - _('User data to be exposed by the metadata server.'), - ), - } - - OP_NAMES = ( - OP_REBOOT, OP_REBUILD, OP_CHANGE_PASSWORD, OP_PAUSE, OP_UNPAUSE, - OP_SUSPEND, OP_RESUME, OP_LOCK, OP_UNLOCK, OP_START, OP_STOP, - OP_RESCUE, OP_UNRESCUE, OP_EVACUATE, OP_MIGRATE, - ) = ( - 'reboot', 'rebuild', 'change_password', 'pause', 'unpause', - 'suspend', 'resume', 'lock', 'unlock', 'start', 'stop', - 'rescue', 'unrescue', 'evacuate', 'migrate', - ) - - ADMIN_PASSWORD = 'admin_pass' - RESCUE_IMAGE = 'image_ref' - EVACUATE_OPTIONS = ( - EVACUATE_HOST, EVACUATE_FORCE - ) = ( - 'host', 'force' - ) - - OPERATIONS = { - OP_REBOOT: schema.Operation( - _("Reboot the nova server."), - schema={ - consts.REBOOT_TYPE: schema.StringParam( - _("Type of reboot which can be 'SOFT' or 'HARD'."), - default=consts.REBOOT_SOFT, - constraints=[ - constraints.AllowedValues(consts.REBOOT_TYPES), - ] - ) - } - ), - OP_REBUILD: schema.Operation( - _("Rebuild the server using current image and admin password."), - ), - OP_CHANGE_PASSWORD: schema.Operation( - _("Change the administrator password."), - schema={ - ADMIN_PASSWORD: schema.StringParam( - _("New password for the administrator.") - ) - } - ), - OP_PAUSE: schema.Operation( - _("Pause the server from running."), - ), - OP_UNPAUSE: schema.Operation( - _("Unpause the server to running state."), - ), - OP_SUSPEND: schema.Operation( - _("Suspend the running of the server."), - ), - OP_RESUME: schema.Operation( - _("Resume the running of the server."), - ), - OP_LOCK: schema.Operation( - _("Lock the server."), - ), - OP_UNLOCK: schema.Operation( - _("Unlock the server."), - ), - OP_START: schema.Operation( - _("Start the server."), - ), - OP_STOP: schema.Operation( - _("Stop the server."), - ), - OP_RESCUE: schema.Operation( - _("Rescue the server."), - schema={ - RESCUE_IMAGE: schema.StringParam( - _("A string referencing the image to use."), - ), - } - ), - OP_UNRESCUE: schema.Operation( - _("Unrescue the server."), - ), - OP_EVACUATE: schema.Operation( - _("Evacuate the server to a different host."), - schema={ - EVACUATE_HOST: schema.StringParam( - _("The target host to evacuate the server."), - ), - EVACUATE_FORCE: schema.StringParam( - _("Whether the evacuation should be a forced one.") - ) - } - ) - } - - def __init__(self, type_name, name, **kwargs): - super(ServerProfile, self).__init__(type_name, name, **kwargs) - self.server_id = None - self.stop_timeout = cfg.CONF.default_nova_timeout - - def _validate_az(self, obj, az_name, reason=None): - try: - res = self.compute(obj).validate_azs([az_name]) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - else: - raise - - if not res: - msg = _("The specified %(key)s '%(value)s' could not be found" - ) % {'key': self.AVAILABILITY_ZONE, 'value': az_name} - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - else: - raise exc.InvalidSpec(message=msg) - - return az_name - - def _validate_flavor(self, obj, name_or_id, reason=None): - flavor = None - msg = '' - try: - flavor = self.compute(obj).flavor_find(name_or_id, False) - except exc.InternalError as ex: - msg = str(ex) - if reason is None: # reason is 'validate' - if ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.FLAVOR, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - if flavor is not None: - if not flavor.is_disabled: - return flavor - msg = _("The specified %(k)s '%(v)s' is disabled" - ) % {'k': self.FLAVOR, 'v': name_or_id} - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=msg) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - else: - raise exc.InvalidSpec(message=msg) - - def _validate_image(self, obj, name_or_id, reason=None): - try: - return self.glance(obj).image_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.IMAGE, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def _validate_keypair(self, obj, name_or_id, reason=None): - try: - return self.compute(obj).keypair_find(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified %(k)s '%(v)s' could not be found." - ) % {'k': self.KEY_NAME, 'v': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def _validate_volume(self, obj, name_or_id, reason=None): - try: - volume = self.block_storage(obj).volume_get(name_or_id) - if volume.status == 'available': - return volume - - msg = _("The volume %(k)s should be in 'available' status " - "but is in '%(v)s' status." - ) % {'k': name_or_id, 'v': volume.status} - raise exc.InvalidSpec(message=msg) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif ex.code == 404: - msg = _("The specified volume '%(k)s' could not be found." - ) % {'k': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def _validate_volume_type(self, obj, name_or_id, reason=None): - try: - return self.block_storage(obj).volume_type_get(name_or_id, False) - except exc.InternalError as ex: - if reason == 'create': - raise exc.EResourceCreation(type='server', - message=str(ex)) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - elif ex.code == 404: - msg = _("The specified volume type '%(k)s' could not be found." - ) % {'k': name_or_id} - raise exc.InvalidSpec(message=msg) - else: - raise - - def do_validate(self, obj): - """Validate if the spec has provided valid info for server creation. - - :param obj: The node object. - """ - # validate availability_zone - az_name = self.properties[self.AVAILABILITY_ZONE] - if az_name is not None: - self._validate_az(obj, az_name) - - # validate flavor - flavor = self.properties[self.FLAVOR] - self._validate_flavor(obj, flavor) - - # validate image - image = self.properties[self.IMAGE] - if image is not None: - self._validate_image(obj, image) - - # validate key_name - keypair = self.properties[self.KEY_NAME] - if keypair is not None: - self._validate_keypair(obj, keypair) - - # validate networks - networks = self.properties[self.NETWORKS] - for net in networks: - self._validate_network(obj, net) - - return True - - def _resolve_bdm(self, obj, bdm, reason=None): - for bd in bdm: - for key in self.BDM2_KEYS: - if bd[key] is None: - del bd[key] - if 'volume_type' in bd: - volume_type = self._validate_volume_type(obj, - bd['volume_type']) - bd['volume_type'] = volume_type.name - if 'uuid' in bd and 'source_type' in bd: - if bd['source_type'] == 'image': - self._validate_image(obj, bd['uuid'], reason) - elif bd['source_type'] == 'volume': - self._validate_volume(obj, bd['uuid'], reason) - - return bdm - - def _check_security_groups(self, nc, net_spec, result): - """Check security groups. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - sgs = net_spec.get(self.PORT_SECURITY_GROUPS) - if not sgs: - return - - res = [] - try: - for sg in sgs: - try: - # try to find sg scoped by project first - sg_obj = nc.security_group_find( - sg, project_id=self.project) - except exc.InternalError: - # if it fails to find sg, try without project scope - sg_obj = nc.security_group_find(sg) - res.append(sg_obj.id) - except exc.InternalError as ex: - return str(ex) - - result[self.PORT_SECURITY_GROUPS] = res - return - - def _check_network(self, nc, net, result): - """Check the specified network. - - :param nc: network driver connection. - :param net: the name or ID of network to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if net is None: - return - try: - net_obj = nc.network_get(net) - if net_obj is None: - return _("The specified network %s could not be found.") % net - result[self.NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - def _check_subnet(self, nc, subnet, result): - """Check the specified subnet. - - :param nc: network driver connection. - :param subnet: the name or ID of subnet to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if subnet is None: - return - try: - net_obj = nc.subnet_get(subnet) - if net_obj is None: - return _("The specified subnet %s could not be found." - ) % subnet - result[self.SUBNET] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - def _check_port(self, nc, port, result): - """Check the specified port. - - :param nc: network driver connection. - :param port: the name or ID of port to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - if port is None: - return - - try: - port_obj = nc.port_find(port) - if port_obj.status != 'DOWN': - return _("The status of the port %(p)s must be DOWN" - ) % {'p': port} - result[self.PORT] = port_obj.id - return - except exc.InternalError as ex: - return str(ex) - - def _check_floating_ip(self, nc, net_spec, result): - """Check floating IP and network, if specified. - - :param nc: network driver connection. - :param net_spec: the specification to check. - :param result: the result that is used as return value. - :returns: None if succeeded or an error message if things go wrong. - """ - net = net_spec.get(self.FLOATING_NETWORK) - if net: - try: - net_obj = nc.network_get(net) - if net_obj is None: - return _("The floating network %s could not be found." - ) % net - result[self.FLOATING_NETWORK] = net_obj.id - except exc.InternalError as ex: - return str(ex) - - flt_ip = net_spec.get(self.FLOATING_IP) - if not flt_ip: - return - - try: - # Find floating ip with this address - fip = nc.floatingip_find(flt_ip) - if fip: - if fip.status == 'ACTIVE': - return _('the floating IP %s has been used.') % flt_ip - result['floating_ip_id'] = fip.id - - # Create a floating IP with address if floating ip unspecified - if not net: - return _('Must specify a network to create floating IP') - - result[self.FLOATING_IP] = flt_ip - return - except exc.InternalError as ex: - return str(ex) - - def _validate_network(self, obj, net_spec, reason=None): - - def _verify(error): - if error is None: - return - - if reason == 'create': - raise exc.EResourceCreation(type='server', message=error) - elif reason == 'update': - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=error) - else: - raise exc.InvalidSpec(message=error) - - nc = self.network(obj) - result = {} - - # check network - net = net_spec.get(self.NETWORK) - error = self._check_network(nc, net, result) - _verify(error) - - # check subnet - subnet_net = net_spec.get(self.SUBNET) - error = self._check_subnet(nc, subnet_net, result) - _verify(error) - - # check port - port = net_spec.get(self.PORT) - error = self._check_port(nc, port, result) - _verify(error) - - if port is None and net is None: - _verify(_("One of '%(p)s' and '%(n)s' must be provided" - ) % {'p': self.PORT, 'n': self.NETWORK}) - - fixed_ip = net_spec.get(self.FIXED_IP) - if fixed_ip: - if port is not None: - _verify(_("The '%(p)s' property and the '%(fip)s' property " - "cannot be specified at the same time" - ) % {'p': self.PORT, 'fip': self.FIXED_IP}) - result[self.FIXED_IP] = fixed_ip - - # Validate vnic_type input - vnic_type = net_spec.get(self.VNIC_TYPE, None) - if vnic_type is not None: - if vnic_type not in ['normal', 'direct', 'macvtap']: - _verify(_("vnic_type: '%(v)s' is not supported." - "(supported types are: normal, direct, macvtap)" - ) % {'v': vnic_type}) - result[self.VNIC_TYPE] = vnic_type - - # Check security_groups - error = self._check_security_groups(nc, net_spec, result) - _verify(error) - - # Check floating IP - error = self._check_floating_ip(nc, net_spec, result) - _verify(error) - - return result - - def _get_port(self, obj, net_spec): - """Fetch or create a port. - - :param obj: The node object. - :param net_spec: The parameters to create a port. - :returns: Created port object and error message. - """ - port_id = net_spec.get(self.PORT, None) - if port_id: - try: - port = self.network(obj).port_find(port_id) - return port, None - except exc.InternalError as ex: - return None, ex - port_attr = { - 'network_id': net_spec.get(self.NETWORK), - } - fixed_ips = {} - fixed_ip = net_spec.get(self.FIXED_IP, None) - if fixed_ip: - fixed_ips['fixed_ip'] = fixed_ip - subnet = net_spec.get(self.SUBNET, None) - if subnet: - fixed_ips['subnet_id'] = subnet - if fixed_ips: - port_attr['fixed_ips'] = [fixed_ips] - security_groups = net_spec.get(self.PORT_SECURITY_GROUPS, []) - if security_groups: - port_attr['security_groups'] = security_groups - vnic_type = net_spec.get(self.VNIC_TYPE, None) - if vnic_type: - port_attr['binding_vnic_type'] = vnic_type - try: - port = self.network(obj).port_create(**port_attr) - LOG.debug('Network port_attr : %s', port) - return port, None - except exc.InternalError as ex: - return None, ex - - def _delete_ports(self, obj, ports): - """Delete ports. - - :param obj: The node object - :param ports: A list of internal ports. - :returns: None for succeed or error for failure. - """ - pp = copy.deepcopy(ports) - for port in pp: - # remove port created by senlin - if port.get('remove', False): - try: - # remove floating IP created by senlin - if port.get('floating', None) and port[ - 'floating'].get('remove', False): - self.network(obj).floatingip_delete( - port['floating']['id']) - self.network(obj).port_delete(port['id']) - except exc.InternalError as ex: - return ex - ports.remove(port) - node_data = obj.data - node_data['internal_ports'] = ports - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - - def _get_floating_ip(self, obj, fip_spec, port_id): - """Find or Create a floating IP. - - :param obj: The node object. - :param fip_spec: The parameters to create a floating ip - :param port_id: The port ID to associate with - :returns: A floating IP object and error message. - """ - floating_ip_id = fip_spec.get('floating_ip_id', None) - if floating_ip_id: - try: - fip = self.network(obj).floatingip_find(floating_ip_id) - if fip.port_id is None: - attr = {'port_id': port_id} - fip = self.network(obj).floatingip_update(fip, **attr) - return fip, None - except exc.InternalError as ex: - return None, ex - net_id = fip_spec.get(self.FLOATING_NETWORK) - fip_addr = fip_spec.get(self.FLOATING_IP) - attr = { - 'port_id': port_id, - 'floating_network_id': net_id, - } - if fip_addr: - attr.update({'floating_ip_address': fip_addr}) - try: - fip = self.network(obj).floatingip_create(**attr) - return fip, None - except exc.InternalError as ex: - return None, ex - - def _create_ports_from_properties(self, obj, networks, action_type): - """Create or find ports based on networks property. - - :param obj: The node object. - :param networks: The networks property used for node. - :param action_type: Either 'create' or 'update'. - - :returns: A list of created port's attributes. - """ - internal_ports = obj.data.get('internal_ports', []) - if not networks: - return [] - - for net_spec in networks: - net = self._validate_network(obj, net_spec, action_type) - # Create port - port, ex = self._get_port(obj, net) - # Delete created ports before raise error - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs = { - 'id': port.id, - 'network_id': port.network_id, - 'security_group_ids': port.security_group_ids, - 'fixed_ips': port.fixed_ips - } - if self.PORT not in net: - port_attrs.update({'remove': True}) - # Create floating ip - if 'floating_ip_id' in net or self.FLOATING_NETWORK in net: - fip, ex = self._get_floating_ip(obj, net, port_attrs['id']) - if ex: - d_ex = self._delete_ports(obj, internal_ports) - if d_ex: - raise d_ex - else: - raise ex - port_attrs['floating'] = { - 'id': fip.id, - 'floating_ip_address': fip.floating_ip_address, - 'floating_network_id': fip.floating_network_id, - } - if self.FLOATING_NETWORK in net: - port_attrs['floating'].update({'remove': True}) - internal_ports.append(port_attrs) - if internal_ports: - try: - node_data = obj.data - node_data.update(internal_ports=internal_ports) - node_obj.Node.update(self.context, obj.id, {'data': node_data}) - except exc.ResourceNotFound: - self._rollback_ports(obj, internal_ports) - raise - return internal_ports - - def _build_metadata(self, obj, usermeta): - """Build custom metadata for server. - - :param obj: The node object to operate on. - :return: A dictionary containing the new metadata. - """ - metadata = usermeta or {} - metadata['cluster_node_id'] = obj.id - if obj.cluster_id: - metadata['cluster_id'] = obj.cluster_id - metadata['cluster_node_index'] = str(obj.index) - - return metadata - - def _update_zone_info(self, obj, server): - """Update the actual zone placement data. - - :param obj: The node object associated with this server. - :param server: The server object returned from creation. - """ - if server.availability_zone: - placement = obj.data.get('placement', None) - if not placement: - obj.data['placement'] = {'zone': server.availability_zone} - else: - obj.data['placement'].setdefault('zone', - server.availability_zone) - # It is safe to use admin context here - ctx = context.get_admin_context() - node_obj.Node.update(ctx, obj.id, {'data': obj.data}) - - def do_create(self, obj): - """Create a server for the node object. - - :param obj: The node object for which a server will be created. - """ - kwargs = self._generate_kwargs() - - admin_pass = self.properties[self.ADMIN_PASS] - if admin_pass: - kwargs.pop(self.ADMIN_PASS) - kwargs['adminPass'] = admin_pass - - auto_disk_config = self.properties[self.AUTO_DISK_CONFIG] - kwargs.pop(self.AUTO_DISK_CONFIG) - kwargs['OS-DCF:diskConfig'] = 'AUTO' if auto_disk_config else 'MANUAL' - - image_ident = self.properties[self.IMAGE] - if image_ident is not None: - image = self._validate_image(obj, image_ident, 'create') - kwargs.pop(self.IMAGE) - kwargs['imageRef'] = image.id - - flavor_ident = self.properties[self.FLAVOR] - flavor = self._validate_flavor(obj, flavor_ident, 'create') - kwargs.pop(self.FLAVOR) - kwargs['flavorRef'] = flavor.id - - keypair_name = self.properties[self.KEY_NAME] - if keypair_name: - keypair = self._validate_keypair(obj, keypair_name, 'create') - kwargs['key_name'] = keypair.name - - kwargs['name'] = self.properties[self.NAME] or obj.name - - metadata = self._build_metadata(obj, self.properties[self.METADATA]) - kwargs['metadata'] = metadata - - block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] - if block_device_mapping_v2 is not None: - kwargs['block_device_mapping_v2'] = self._resolve_bdm( - obj, block_device_mapping_v2, 'create') - - user_data = self.properties[self.USER_DATA] - if user_data is not None: - ud = encodeutils.safe_encode(user_data) - kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - - networks = self.properties[self.NETWORKS] - ports = None - if networks is not None: - ports = self._create_ports_from_properties( - obj, networks, 'create') - kwargs['networks'] = [ - {'port': port['id']} for port in ports] - - secgroups = self.properties[self.SECURITY_GROUPS] - if secgroups: - kwargs['security_groups'] = [{'name': sg} for sg in secgroups] - - if 'placement' in obj.data: - if 'zone' in obj.data['placement']: - kwargs['availability_zone'] = obj.data['placement']['zone'] - - if 'servergroup' in obj.data['placement']: - group_id = obj.data['placement']['servergroup'] - hints = self.properties.get(self.SCHEDULER_HINTS) or {} - hints.update({'group': group_id}) - kwargs['scheduler_hints'] = hints - - server = None - resource_id = None - try: - server = self.compute(obj).server_create(**kwargs) - self.compute(obj).wait_for_server( - server.id, timeout=cfg.CONF.default_nova_timeout) - server = self.compute(obj).server_get(server.id) - # Update zone placement info if available - self._update_zone_info(obj, server) - return server.id - except exc.ResourceNotFound: - self._rollback_ports(obj, ports) - self._rollback_instance(obj, server) - raise - except exc.InternalError as ex: - if server and server.id: - resource_id = server.id - LOG.debug('Deleting server %s that is ERROR state after' - ' create.', server.id) - - try: - obj.physical_id = server.id - self.do_delete(obj, internal_ports=ports) - except Exception: - LOG.error('Failed to delete server %s', server.id) - pass - elif ports: - self._delete_ports(obj, ports) - raise exc.EResourceCreation(type='server', - message=str(ex), - resource_id=resource_id) - - def _generate_kwargs(self): - """Generate the base kwargs for a server. - - :return: - """ - kwargs = {} - for key in self.KEYS: - # context is treated as connection parameters - if key == self.CONTEXT: - continue - - if self.properties[key] is not None: - kwargs[key] = self.properties[key] - return kwargs - - def _rollback_ports(self, obj, ports): - """Rollback any ports created after a ResourceNotFound exception. - - :param obj: The node object. - :param ports: A list of ports which attached to this server. - :return: - """ - if not ports: - return - - LOG.warning( - 'Rolling back ports for Node %s.', - obj.id - ) - - for port in ports: - if not port.get('remove', False): - continue - try: - if (port.get('floating') and - port['floating'].get('remove', False)): - self.network(obj).floatingip_delete( - port['floating']['id'] - ) - self.network(obj).port_delete(port['id']) - except exc.InternalError as ex: - LOG.debug( - 'Failed to delete port %s during rollback for Node %s: %s', - port['id'], obj.id, ex - ) - - def _rollback_instance(self, obj, server): - """Rollback an instance created after a ResourceNotFound exception. - - :param obj: The node object. - :param server: A server. - :return: - """ - if not server or not server.id: - return - - LOG.warning( - 'Rolling back instance %s for Node %s.', - server.id, obj.id - ) - - try: - self.compute(obj).server_force_delete(server.id, True) - except exc.InternalError as ex: - LOG.debug( - 'Failed to delete instance %s during rollback for Node %s: %s', - server.id, obj.id, ex - ) - - def do_delete(self, obj, **params): - """Delete the physical resource associated with the specified node. - - :param obj: The node object to operate on. - :param kwargs params: Optional keyword arguments for the delete - operation. - :returns: This operation always return True unless exception is - caught. - :raises: `EResourceDeletion` if interaction with compute service fails. - """ - server_id = obj.physical_id - ignore_missing = params.get('ignore_missing', True) - internal_ports = obj.data.get('internal_ports', []) - force = params.get('force', False) - timeout = params.get('timeout', cfg.CONF.default_nova_timeout) - - try: - if server_id: - driver = self.compute(obj) - if force: - driver.server_force_delete(server_id, ignore_missing) - else: - driver.server_delete(server_id, ignore_missing) - - driver.wait_for_server_delete(server_id, timeout=timeout) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=server_id, - message=str(ex)) - finally: - if internal_ports: - ex = self._delete_ports(obj, internal_ports) - if ex: - raise exc.EResourceDeletion(type='server', d=server_id, - message=str(ex)) - return True - - def _check_server_name(self, obj, profile): - """Check if there is a new name to be assigned to the server. - - :param obj: The node object to operate on. - :param profile: The new profile which may contain a name for - the server instance. - :return: A tuple consisting a boolean indicating whether the name - needs change and the server name determined. - """ - old_name = self.properties[self.NAME] or obj.name - new_name = profile.properties[self.NAME] or obj.name - if old_name == new_name: - return False, new_name - return True, new_name - - def _update_name(self, obj, new_name): - """Update the name of the server. - - :param obj: The node object to operate. - :param new_name: The new name for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_update(obj.physical_id, name=new_name) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _check_password(self, obj, new_profile): - """Check if the admin password has been changed in the new profile. - - :param obj: The server node to operate, not used currently. - :param new_profile: The new profile which may contain a new password - for the server instance. - :return: A tuple consisting a boolean indicating whether the password - needs a change and the password determined which could be - '' if new password is not set. - """ - old_passwd = self.properties.get(self.ADMIN_PASS) or '' - new_passwd = new_profile.properties[self.ADMIN_PASS] or '' - if old_passwd == new_passwd: - return False, new_passwd - return True, new_passwd - - def _update_password(self, obj, new_password): - """Update the admin password for the server. - - :param obj: The node object to operate. - :param new_password: The new password for the server instance. - :return: ``None``. - :raises: ``EResourceUpdate``. - """ - try: - self.compute(obj).server_change_password(obj.physical_id, - new_password) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_metadata(self, obj, new_profile): - """Update the server metadata. - - :param obj: The node object to operate on. - :param new_profile: The new profile that may contain some changes to - the metadata. - :returns: ``None`` - :raises: `EResourceUpdate`. - """ - old_meta = self._build_metadata(obj, self.properties[self.METADATA]) - new_meta = self._build_metadata(obj, - new_profile.properties[self.METADATA]) - if new_meta == old_meta: - return - - try: - self.compute(obj).server_metadata_update(obj.physical_id, new_meta) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_flavor(self, obj, new_profile): - """Update server flavor. - - :param obj: The node object to operate on. - :returns: Returns true if the flavor was updated or false otherwise. - :raises: `EResourceUpdate` when operation was a failure. - """ - old_flavor = self.properties[self.FLAVOR] - new_flavor = new_profile.properties[self.FLAVOR] - cc = self.compute(obj) - oldflavor = self._validate_flavor(obj, old_flavor, 'update') - newflavor = self._validate_flavor(obj, new_flavor, 'update') - if oldflavor.id == newflavor.id: - return False - - try: - # server has to be active or stopped in order to resize - # stop server if it is active - server = cc.server_get(obj.physical_id) - if server.status == consts.VS_ACTIVE: - cc.server_stop(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_SHUTOFF, - timeout=self.stop_timeout) - elif server.status != consts.VS_SHUTOFF: - raise exc.InternalError( - message='Server needs to be ACTIVE or STOPPED in order to' - ' update flavor.') - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - try: - cc.server_resize(obj.physical_id, newflavor.id) - cc.wait_for_server(obj.physical_id, 'VERIFY_RESIZE') - except exc.InternalError as ex: - msg = str(ex) - try: - server = cc.server_get(obj.physical_id) - if server.status == 'RESIZE': - cc.server_resize_revert(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_SHUTOFF) - - # start server back up in case of exception during resize - cc.server_start(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_ACTIVE) - except exc.InternalError as ex1: - msg = str(ex1) - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - - try: - cc.server_resize_confirm(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_SHUTOFF) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - return True - - def _update_image(self, obj, new_profile, new_name, new_password): - """Update image used by server node. - - :param obj: The node object to operate on. - :param new_profile: The profile which may contain a new image name or - ID to use. - :param new_name: The name for the server node. - :param newn_password: The new password for the administrative account - if provided. - :returns: A boolean indicating whether the image needs an update. - :raises: ``InternalError`` if operation was a failure. - """ - new_image = new_profile.properties[self.IMAGE] - if not new_image: - msg = _("Updating Nova server with image set to None is not " - "supported by Nova") - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=msg) - # check the new image first - img_new = self._validate_image(obj, new_image, reason='update') - new_image_id = img_new.id - - driver = self.compute(obj) - - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - old_image_id = self._get_image_id(obj, server, 'updating') - - if new_image_id == old_image_id: - return False - - try: - # server has to be active or stopped in order to resize - # stop server if it is active - if server.status == consts.VS_ACTIVE: - driver.server_stop(obj.physical_id) - driver.wait_for_server(obj.physical_id, consts.VS_SHUTOFF, - timeout=self.stop_timeout) - elif server.status != consts.VS_SHUTOFF: - raise exc.InternalError( - message='Server needs to be ACTIVE or STOPPED in order to' - ' update image.') - - driver.server_rebuild(obj.physical_id, new_image_id, - new_name, new_password) - driver.wait_for_server(obj.physical_id, consts.VS_SHUTOFF) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - return True - - def _update_network_add_port(self, obj, networks): - """Create new interfaces for the server node. - - :param obj: The node object to operate. - :param networks: A list containing information about new network - interfaces to be created. - :returns: ``None``. - :raises: ``EResourceUpdate`` if interaction with drivers failed. - """ - cc = self.compute(obj) - try: - server = cc.server_get(obj.physical_id) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - ports = self._create_ports_from_properties( - obj, networks, 'update') - for port in ports: - params = {'port_id': port['id']} - try: - cc.server_interface_create(server, **params) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', - id=obj.physical_id, - message=str(ex)) - - def _find_port_by_net_spec(self, obj, net_spec, ports): - """Find existing ports match with specific network properties. - - :param obj: The node object. - :param net_spec: Network property of this profile. - :param ports: A list of ports which attached to this server. - :returns: A list of candidate ports matching this network spec. - """ - # TODO(anyone): handle security_groups - net = self._validate_network(obj, net_spec, 'update') - selected_ports = [] - for p in ports: - floating = p.get('floating', {}) - floating_network = net.get(self.FLOATING_NETWORK, None) - if floating_network and floating.get( - 'floating_network_id') != floating_network: - continue - floating_ip_address = net.get(self.FLOATING_IP, None) - if floating_ip_address and floating.get( - 'floating_ip_address') != floating_ip_address: - continue - # If network properties didn't contain floating ip, - # then we should better not make a port with floating ip - # as candidate. - if floating and not floating_network and not floating_ip_address: - continue - port_id = net.get(self.PORT, None) - if port_id and p['id'] != port_id: - continue - fixed_ip = net.get(self.FIXED_IP, None) - if fixed_ip: - fixed_ips = [ff['ip_address'] for ff in p['fixed_ips']] - if fixed_ip not in fixed_ips: - continue - network = net.get(self.NETWORK, None) - if network: - net_id = self.network(obj).network_get(network).id - if p['network_id'] != net_id: - continue - selected_ports.append(p) - return selected_ports - - def _update_network_remove_port(self, obj, networks): - """Delete existing interfaces from the node. - - :param obj: The node object to operate. - :param networks: A list containing information about network - interfaces to be created. - :returns: ``None`` - :raises: ``EResourceUpdate`` - """ - cc = self.compute(obj) - nc = self.network(obj) - internal_ports = obj.data.get('internal_ports', []) - - if networks: - try: - # stop server if it is active - server = cc.server_get(obj.physical_id) - if server.status == consts.VS_ACTIVE: - cc.server_stop(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_SHUTOFF, - timeout=self.stop_timeout) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - for n in networks: - candidate_ports = self._find_port_by_net_spec( - obj, n, internal_ports) - port = candidate_ports[0] - try: - # Detach port from server - cc.server_interface_delete(port['id'], obj.physical_id) - # delete port if created by senlin - if port.get('remove', False): - nc.port_delete(port['id'], ignore_missing=True) - # delete floating IP if created by senlin - if (port.get('floating', None) and - port['floating'].get('remove', False)): - nc.floatingip_delete(port['floating']['id'], - ignore_missing=True) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - internal_ports.remove(port) - obj.data['internal_ports'] = internal_ports - node_obj.Node.update(self.context, obj.id, {'data': obj.data}) - - def _nw_compare(self, n1, n2, attribute): - return n1.get(attribute, None) == n2.get(attribute, None) - - def _update_network_update_port(self, obj, networks): - """Update existing port in network from the node. - - Currently only update to security group is supported. - - :param obj: The node object to operate. - :param networks: A list networks that contain updated security groups. - :returns: ``None`` - :raises: ``EResourceUpdate`` - """ - nc = self.network(obj) - internal_ports = obj.data.get('internal_ports', []) - - # process each network to be updated - for n in networks: - # verify network properties and resolve names into ids - net = self._validate_network(obj, n, 'update') - - # find existing port that matches network - candidate_ports = self._find_port_by_net_spec( - obj, net, internal_ports) - port = candidate_ports[0] - try: - # set updated security groups for port - port_attr = { - 'security_groups': net.get(self.PORT_SECURITY_GROUPS, []), - } - LOG.debug("Setting security groups %s for port %s", - port_attr, port['id']) - nc.port_update(port['id'], **port_attr) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - def _update_network(self, obj, new_profile): - """Updating server network interfaces. - - :param obj: The node object to operate. - :param new_profile: The new profile which may contain new network - settings. - :return: Returns a tuple of booleans if network was created or deleted. - :raises: ``EResourceUpdate`` if there are driver failures. - """ - networks_current = self.properties[self.NETWORKS] - networks_create = new_profile.properties[self.NETWORKS] - networks_delete = copy.deepcopy(networks_current) - networks_update = [] - - for nw in networks_current: - if nw in networks_create: - # network already exist. no need to create or delete it. - LOG.debug("Network %s already exists, skip create/delete", nw) - networks_create.remove(nw) - networks_delete.remove(nw) - - # find networks for which only security group changed - for nw in networks_current: - # networks to be created with only sg changes - sg_create_nw = [n for n in networks_create - if (self._nw_compare(n, nw, 'network') and - self._nw_compare(n, nw, 'port') and - self._nw_compare(n, nw, 'fixed_ip') and - self._nw_compare(n, nw, 'floating_network') and - self._nw_compare(n, nw, 'floating_ip') and - self._nw_compare(n, nw, 'subnet'))] - for n in sg_create_nw: - # don't create networks with only security group changes - LOG.debug("Network %s only has security group changes, " - "don't create/delete it. Only update it.", n) - networks_create.remove(n) - networks_update.append(n) - if nw in networks_delete: - networks_delete.remove(nw) - - # update network - if networks_update: - self._update_network_update_port(obj, networks_update) - - # Detach some existing interfaces - if networks_delete: - self._update_network_remove_port(obj, networks_delete) - - # Attach new interfaces - if networks_create: - self._update_network_add_port(obj, networks_create) - - return networks_create, networks_delete - - def do_update(self, obj, new_profile=None, **params): - """Perform update on the server. - - :param obj: the server to operate on - :param new_profile: the new profile for the server. - :param params: a dictionary of optional parameters. - :returns: True if update was successful or False otherwise. - :raises: `EResourceUpdate` if operation fails. - """ - if not obj.physical_id: - return False - - if not new_profile: - return False - - if not self.validate_for_update(new_profile): - return False - - self.stop_timeout = params.get('cluster.stop_timeout_before_update', - cfg.CONF.default_nova_timeout) - - if not isinstance(self.stop_timeout, int): - raise exc.EResourceUpdate( - type='server', id=obj.physical_id, - message='cluster.stop_timeout_before_update value must be of ' - 'type int.') - - name_changed, new_name = self._check_server_name(obj, new_profile) - passwd_changed, new_passwd = self._check_password(obj, new_profile) - # Update server image: may have side effect of changing server name - # and/or admin password - image_changed = self._update_image(obj, new_profile, new_name, - new_passwd) - if not image_changed: - # we do this separately only when rebuild wasn't performed - if name_changed: - self._update_name(obj, new_name) - if passwd_changed: - self._update_password(obj, new_passwd) - - # Update server flavor: note that flavor is a required property - flavor_changed = self._update_flavor(obj, new_profile) - network_created, network_deleted = self._update_network( - obj, new_profile) - - # TODO(Yanyan Hu): Update block_device properties - # Update server metadata - self._update_metadata(obj, new_profile) - - # start server if it was stopped as part of this update operation - if image_changed or flavor_changed or network_deleted: - cc = self.compute(obj) - try: - server = cc.server_get(obj.physical_id) - if server.status == consts.VS_SHUTOFF: - cc.server_start(obj.physical_id) - cc.wait_for_server(obj.physical_id, consts.VS_ACTIVE) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - - return True - - def do_get_details(self, obj): - known_keys = { - 'OS-DCF:diskConfig', - 'OS-EXT-AZ:availability_zone', - 'OS-EXT-STS:power_state', - 'OS-EXT-STS:vm_state', - 'accessIPv4', - 'accessIPv6', - 'config_drive', - 'created', - 'hostId', - 'id', - 'key_name', - 'locked', - 'metadata', - 'name', - 'os-extended-volumes:volumes_attached', - 'progress', - 'status', - 'updated' - } - if obj.physical_id is None or obj.physical_id == '': - return {} - - driver = self.compute(obj) - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - return { - 'Error': { - 'code': ex.code, - 'message': str(ex) - } - } - - if server is None: - return {} - server_data = server.to_dict() - if 'id' in server_data['image']: - image_id = server_data['image']['id'] - else: - image_id = server_data['image'] - attached_volumes = [] - if ('attached_volumes' in server_data and - len(server_data['attached_volumes']) > 0): - for volume in server_data['attached_volumes']: - attached_volumes.append(volume['id']) - details = { - 'image': image_id, - 'attached_volumes': attached_volumes, - 'flavor': self._get_flavor_id(obj, server_data), - } - for key in known_keys: - if key in server_data: - details[key] = server_data[key] - - # process special keys like 'OS-EXT-STS:task_state': these keys have - # a default value '-' when not existing - special_keys = [ - 'OS-EXT-STS:task_state', - 'OS-SRV-USG:launched_at', - 'OS-SRV-USG:terminated_at', - ] - for key in special_keys: - if key in server_data: - val = server_data[key] - details[key] = val if val else '-' - - # process network addresses - details['addresses'] = copy.deepcopy(server_data['addresses']) - - # process security groups - sgroups = [] - if 'security_groups' in server_data: - if server_data['security_groups'] is not None: - for sg in server_data['security_groups']: - sgroups.append(sg['name']) - # when we have multiple nics the info will include the - # security groups N times where N == number of nics. Be nice - # and only display it once. - sgroups = list(set(sgroups)) - if len(sgroups) == 0: - details['security_groups'] = '' - elif len(sgroups) == 1: - details['security_groups'] = sgroups[0] - else: - details['security_groups'] = sgroups - - return dict((k, details[k]) for k in sorted(details)) - - def _get_flavor_id(self, obj, server): - """Get flavor id. - - :param obj: The node object. - :param dict server: The server object. - :return: The flavor_id for the server. - """ - flavor = server['flavor'] - flavor_name = flavor.get('name') or flavor.get('original_name') - return self.compute(obj).flavor_find(flavor_name, False).id - - def _get_image_id(self, obj, server, op): - """Get image id. - - :param obj: The node object. - :param server: The server object. - :param op: The operate on the node. - :return: The image_id for the server. - """ - image_id = None - - if server.image: - image_id = server.image['id'] or server.image - # when booting a nova server from volume, the image property - # can be ignored. - # we try to find a volume which is bootable and use its image_id - # for the server. - elif server.attached_volumes: - cinder_driver = self.block_storage(obj) - for volume_ids in server.attached_volumes: - try: - vs = cinder_driver.volume_get(volume_ids['id']) - if vs.is_bootable: - image_id = vs.volume_image_metadata['image_id'] - except exc.InternalError as ex: - raise exc.EResourceOperation(op=op, type='server', - id=obj.physical_id, - message=str(ex)) - else: - msg = _("server doesn't have an image and it has no " - "bootable volume") - raise exc.EResourceOperation(op=op, type="server", - id=obj.physical_id, - message=msg) - return image_id - - def _handle_generic_op(self, obj, driver_func_name, - op_name, expected_server_status=None, - **kwargs): - """Generic handler for standard server operations.""" - if not obj.physical_id: - return False - server_id = obj.physical_id - nova_driver = self.compute(obj) - - try: - driver_func = getattr(nova_driver, driver_func_name) - driver_func(server_id, **kwargs) - if expected_server_status: - nova_driver.wait_for_server(server_id, expected_server_status) - return True - except exc.InternalError as ex: - raise exc.EResourceOperation(op=op_name, type='server', - id=server_id, - message=str(ex)) - - def do_adopt(self, obj, overrides=None, snapshot=False): - """Adopt an existing server node for management. - - :param obj: A node object for this operation. It could be a puppet - node that provides only 'user', 'project' and 'physical_id' - properties when doing a preview. It can be a real Node object for - node adoption. - :param overrides: A dict containing the properties that will be - overridden when generating a profile for the server. - :param snapshot: A boolean flag indicating whether the profile should - attempt a snapshot operation before adopting the server. If set to - True, the ID of the snapshot will be used as the image ID. - - :returns: A dict containing the spec created from the server object or - a dict containing error information if failure occurred. - """ - driver = self.compute(obj) - - # TODO(Qiming): Add snapshot support - # snapshot = driver.snapshot_create(...) - - error = {} - try: - server = driver.server_get(obj.physical_id) - except exc.InternalError as ex: - error = {'code': ex.code, 'message': str(ex)} - - if error: - return {'Error': error} - - spec = {} - # Context? - # TODO(Qiming): Need to fetch admin password from a different API - spec[self.AUTO_DISK_CONFIG] = server.disk_config == 'AUTO' - - spec[self.AVAILABILITY_ZONE] = server.availability_zone - - # TODO(Anyone): verify if this needs a format conversion - bdm = server.block_device_mapping or [] - spec[self.BLOCK_DEVICE_MAPPING_V2] = bdm - - spec[self.CONFIG_DRIVE] = server.has_config_drive or False - spec[self.FLAVOR] = server.flavor['id'] - spec[self.IMAGE] = self._get_image_id(obj, server, 'adopting') - spec[self.KEY_NAME] = server.key_name - - # metadata - metadata = server.metadata or {} - metadata.pop('cluster_id', None) - metadata.pop('cluster_node_id', None) - metadata.pop('cluster_node_index', None) - spec[self.METADATA] = metadata - - # name - spec[self.NAME] = server.name - - networks = server.addresses - net_list = [] - for network, interfaces in networks.items(): - for intf in interfaces: - ip_type = intf.get('OS-EXT-IPS:type') - net = {self.NETWORK: network} - if ip_type == 'fixed' and net not in net_list: - net_list.append({self.NETWORK: network}) - - spec[self.NETWORKS] = net_list - # NOTE: the personality attribute is missing for ever. - spec[self.SECURITY_GROUPS] = [ - sg['name'] for sg in server.security_groups - ] - # TODO(Qiming): get server user_data and parse it. - # Note: user_data is returned in 2.3 microversion API, in a different - # property name. - # spec[self.USER_DATA] = server.user_data - - if overrides: - spec.update(overrides) - - return spec - - def do_join(self, obj, cluster_id): - if not obj.physical_id: - return False - - driver = self.compute(obj) - try: - metadata = {} - metadata['cluster_id'] = cluster_id - metadata['cluster_node_index'] = str(obj.index) - driver.server_metadata_update(obj.physical_id, metadata) - except exc.InternalError as ex: - raise exc.EResourceUpdate(type='server', id=obj.physical_id, - message=str(ex)) - return super(ServerProfile, self).do_join(obj, cluster_id) - - def do_leave(self, obj): - if not obj.physical_id: - return False - - keys = ['cluster_id', 'cluster_node_index'] - try: - self.compute(obj).server_metadata_delete(obj.physical_id, keys) - except exc.InternalError as ex: - raise exc.EResourceDeletion(type='server', id=obj.physical_id, - message=str(ex)) - return super(ServerProfile, self).do_leave(obj) - - def do_check(self, obj): - if not obj.physical_id: - return False - - try: - server = self.compute(obj).server_get(obj.physical_id) - except exc.InternalError as ex: - if ex.code == 404: - raise exc.EServerNotFound(type='server', - id=obj.physical_id, - message=str(ex)) - else: - raise exc.EResourceOperation(op='checking', type='server', - id=obj.physical_id, - message=str(ex)) - - if server is None or server.status != consts.VS_ACTIVE: - return False - - return True - - def do_healthcheck(self, obj, health_check_type): - """Healthcheck operation. - - This method checks if a node is healthy. If health check type is - NODE_STATUS_POLLING it will check the server status. If health check - type is HYPERVISOR_STATUS_POLLING it will check the hypervisor state - and status. - - :param obj: The node object to operate on. - :param health_check_type: The type of health check. Either - NODE_STATUS_POLLING or HYPERVISOR_STATUS_POLLING. - :return status: True indicates node is healthy, False indicates - it is unhealthy. - """ - - if not obj.physical_id: - if obj.status == 'BUILD' or obj.status == 'CREATING': - return True - - LOG.info('%s for %s: server has no physical ID.', - consts.POLL_STATUS_FAIL, obj.name) - return False - - try: - server = self.compute(obj).server_get(obj.physical_id) - except Exception as ex: - if isinstance(ex, exc.InternalError) and ex.code == 404: - # treat resource not found exception as unhealthy - LOG.info('%s for %s: server was not found.', - consts.POLL_STATUS_FAIL, obj.name) - return False - else: - # treat all other exceptions as healthy - LOG.info( - '%s for %s: Exception when trying to get server info but ' - 'ignoring this error: %s.', - consts.POLL_STATUS_PASS, obj.name, ex.message) - return True - - if server is None: - # no server information is available, treat the node as healthy - LOG.info( - '%s for %s: No server information was returned but ignoring ' - 'this error.', - consts.POLL_STATUS_PASS, obj.name) - return True - - if health_check_type == consts.NODE_STATUS_POLLING: - return self._do_healthcheck_server(obj, server) - elif health_check_type == consts.HYPERVISOR_STATUS_POLLING: - return self._do_healthcheck_hypervisor(obj, server) - else: - LOG.info('%s for %s: ignoring invalid health check type %s', - consts.POLL_STATUS_PASS, obj.name, health_check_type) - return True - - def _do_healthcheck_server(self, obj, server): - """Healthcheck operation based on server. - - This method checks if a server node is healthy by getting the server - status from nova. A server is considered unhealthy if it does not - exist or its status is one of the following: - - ERROR - - SHUTOFF - - DELETED - - :param obj: The node object to operate on. - :param server: The server object associated with the node. - :return status: True indicates node is healthy, False indicates - it is unhealthy. - """ - - unhealthy_server_status = [consts.VS_ERROR, consts.VS_SHUTOFF, - consts.VS_DELETED] - - if server.status in unhealthy_server_status: - LOG.info('%s for %s: server status is unhealthy.', - consts.POLL_STATUS_FAIL, obj.name) - return False - - LOG.info('%s for %s', consts.POLL_STATUS_PASS, obj.name) - return True - - def _do_healthcheck_hypervisor(self, obj, server): - """Healthcheck operation based on hypervisor. - - This method checks if a server node is healthy by getting the - hypervisor state and status from nova. A server is considered - unhealthy if the hypervisor it is running on has a state that is down - or a status that is disabled. - - :param obj: The node object to operate on. - :param server: The server object associated with the node. - :return status: True indicates node is healthy, False indicates - it is unhealthy. - """ - - if server.hypervisor_hostname != "": - try: - hv = self.compute(obj).hypervisor_find( - server.hypervisor_hostname) - except Exception as ex: - if isinstance(ex, exc.InternalError) and ex.code == 404: - # treat resource not found exception as unhealthy - LOG.info('%s for %s: hypervisor %s was not found.', - consts.POLL_STATUS_FAIL, obj.name, - server.hypervisor_hostname) - return False - else: - # treat all other exceptions as healthy - LOG.info( - '%s for %s: Exception when trying to get hypervisor ' - 'info for %s, but ignoring this error: %s.', - consts.POLL_STATUS_PASS, obj.name, - server.hypervisor_hostname, ex.message) - return True - - if hv is None: - # no hypervisor information is available, treat the node as - # healthy - LOG.info( - '%s for %s: No hypervisor information was returned but ' - 'ignoring this error.', - consts.POLL_STATUS_PASS, obj.name) - return True - - if hv.state == 'down': - LOG.info('%s for %s: server status is unhealthy because ' - 'hypervisor %s state is down', - consts.POLL_STATUS_FAIL, obj.name, - server.hypervisor_hostname) - return False - - if hv.status == 'disabled': - LOG.info('%s for %s: server status is unhealthy because ' - 'hypervisor %s status is disabled', - consts.POLL_STATUS_FAIL, obj.name, - server.hypervisor_hostname) - return False - - LOG.info('%s for %s', consts.POLL_STATUS_PASS, obj.name) - return True - - def do_recover(self, obj, **options): - """Handler for recover operation. - - :param obj: The node object. - :param dict options: A list for operations each of which has a name - and optionally a map from parameter to values. - :return id: New id of the recovered resource or None if recovery - failed. - :return status: True indicates successful recovery, False indicates - failure. - """ - - # default is recreate if not specified - if 'operation' not in options or not options['operation']: - options['operation'] = consts.RECOVER_RECREATE - - operation = options.get('operation') - - if operation.upper() not in consts.RECOVERY_ACTIONS: - LOG.error("The operation '%s' is not supported", - operation) - return obj.physical_id, False - - op_params = options.get('operation_params', {}) - if operation.upper() == consts.RECOVER_REBOOT: - # default to hard reboot if operation_params was not specified - if not isinstance(op_params, dict): - op_params = {} - if consts.REBOOT_TYPE not in op_params.keys(): - op_params[consts.REBOOT_TYPE] = consts.REBOOT_HARD - - if operation.upper() == consts.RECOVER_RECREATE: - # recreate is implemented in base class - return super(ServerProfile, self).do_recover(obj, **options) - else: - method = getattr(self, "handle_" + operation.lower()) - return method(obj, **op_params) - - def handle_reboot(self, obj, **options): - """Handler for the reboot operation.""" - if not obj.physical_id: - return None, False - - server_id = obj.physical_id - reboot_type = options.get(consts.REBOOT_TYPE, consts.REBOOT_SOFT) - if (not isinstance(reboot_type, str) or - reboot_type.upper() not in consts.REBOOT_TYPES): - return server_id, False - - nova_driver = self.compute(obj) - try: - server = nova_driver.server_get(server_id) - if server is None: - return None, False - nova_driver.server_reboot(server_id, reboot_type) - nova_driver.wait_for_server(obj.physical_id, - consts.VS_ACTIVE) - return server_id, True - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebooting', type='server', - id=server_id, - message=str(ex)) - - def handle_rebuild(self, obj, **options): - """Handler for the rebuild operation. - - :param obj: The node object. - :param dict options: A list for operations each of which has a name - and optionally a map from parameter to values. - :return id: New id of the recovered resource or None if recovery - failed. - :return status: True indicates successful recovery, False indicates - failure. - """ - if not obj.physical_id: - return None, False - - server_id = obj.physical_id - nova_driver = self.compute(obj) - try: - server = nova_driver.server_get(server_id) - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - - if server is None: - return None, False - image_id = options.get(self.IMAGE, None) - if not image_id: - image_id = self._get_image_id(obj, server, 'rebuilding') - - admin_pass = self.properties.get(self.ADMIN_PASS) - name = self.properties[self.NAME] or obj.name - try: - nova_driver.server_rebuild(server_id, image_id, - name, admin_pass) - nova_driver.wait_for_server(server_id, consts.VS_ACTIVE) - return server_id, True - except exc.InternalError as ex: - raise exc.EResourceOperation(op='rebuilding', type='server', - id=server_id, - message=str(ex)) - - def handle_change_password(self, obj, **options): - """Handler for the change_password operation.""" - password = options.get(self.ADMIN_PASSWORD, None) - if (password is None or not isinstance(password, str)): - return False - return self._handle_generic_op(obj, 'server_change_password', - 'change_password', - new_password=password) - - def handle_suspend(self, obj): - """Handler for the suspend operation.""" - return self._handle_generic_op(obj, 'server_suspend', - 'suspend', consts.VS_SUSPENDED) - - def handle_resume(self, obj): - """Handler for the resume operation.""" - return self._handle_generic_op(obj, 'server_resume', - 'resume', consts.VS_ACTIVE) - - def handle_start(self, obj): - """Handler for the start operation.""" - return self._handle_generic_op(obj, 'server_start', - 'start', consts.VS_ACTIVE) - - def handle_stop(self, obj): - """Handler for the stop operation.""" - return self._handle_generic_op(obj, 'server_stop', - 'stop', consts.VS_SHUTOFF) - - def handle_lock(self, obj): - """Handler for the lock operation.""" - return self._handle_generic_op(obj, 'server_lock', - 'lock') - - def handle_unlock(self, obj): - """Handler for the unlock operation.""" - return self._handle_generic_op(obj, 'server_unlock', - 'unlock') - - def handle_pause(self, obj): - """Handler for the pause operation.""" - return self._handle_generic_op(obj, 'server_pause', - 'pause', consts.VS_PAUSED) - - def handle_unpause(self, obj): - """Handler for the unpause operation.""" - return self._handle_generic_op(obj, 'server_unpause', - 'unpause', consts.VS_ACTIVE) - - def handle_rescue(self, obj, **options): - """Handler for the rescue operation.""" - password = options.get(self.ADMIN_PASSWORD, None) - image = options.get(self.IMAGE, None) - if not image: - return False - - self._validate_image(obj, image) - return self._handle_generic_op(obj, 'server_rescue', - 'rescue', consts.VS_RESCUE, - admin_pass=password, - image_ref=image) - - def handle_unrescue(self, obj): - """Handler for the unrescue operation.""" - return self._handle_generic_op(obj, 'server_unrescue', - 'unrescue', consts.VS_ACTIVE) - - def handle_migrate(self, obj): - """Handler for the migrate operation.""" - return self._handle_generic_op(obj, 'server_migrate', - 'migrate', consts.VS_ACTIVE) - - def handle_snapshot(self, obj): - """Handler for the snapshot operation.""" - return self._handle_generic_op(obj, 'server_create_image', - 'snapshot', consts.VS_ACTIVE, - name=obj.name) - - def handle_restore(self, obj, **options): - """Handler for the restore operation.""" - image = options.get(self.IMAGE, None) - if not image: - return False - return self.handle_rebuild(obj, **options) diff --git a/senlin/rpc/__init__.py b/senlin/rpc/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/rpc/client.py b/senlin/rpc/client.py deleted file mode 100644 index 3b02d69e5..000000000 --- a/senlin/rpc/client.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of the senlin engine RPC API. -""" - -from oslo_config import cfg - -from senlin.common import consts -from senlin.common import messaging -from senlin.objects import base as object_base - -_CLIENT = None - - -def get_engine_client(): - global _CLIENT - if not _CLIENT: - _CLIENT = EngineClient() - return _CLIENT - - -class EngineClient(object): - """Client side of the senlin engine rpc API. - - Version History: - - 1.0 - Initial version (Mitaka 1.0 release) - 1.1 - Add cluster-collect call. - """ - - def __init__(self): - serializer = object_base.VersionedObjectSerializer() - self._client = messaging.get_rpc_client(consts.CONDUCTOR_TOPIC, - cfg.CONF.host, - serializer=serializer) - - @staticmethod - def make_msg(method, **kwargs): - return method, kwargs - - def call(self, ctxt, method, req, version=None): - """The main entry for invoking engine service. - - :param ctxt: The request context object. - :param method: The name of the method to be invoked. - :param req: A dict containing a request object. - :param version: The engine RPC API version requested. - """ - if version is not None: - client = self._client.prepare(version=version) - else: - client = self._client - - return client.call(ctxt, method, req=req) - - def cast(self, ctxt, msg, version=None): - method, kwargs = msg - if version is not None: - client = self._client.prepare(version=version) - else: - client = self._client - return client.cast(ctxt, method, **kwargs) diff --git a/senlin/tests/__init__.py b/senlin/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/drivers/__init__.py b/senlin/tests/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/drivers/os_test/README.rst b/senlin/tests/drivers/os_test/README.rst deleted file mode 100644 index ab112c03e..000000000 --- a/senlin/tests/drivers/os_test/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -OpenStack Test Driver -===================== - -This is a fake driver for Senlin test. All interactions between Senlin -and backend OpenStack services, like Nova, Heat are simulated using this -driver. With it, Senlin API and engine workflow can be easily tested -without setting up backend services. - -Configure the following option in senlin.conf to enable this driver: - - `cloud_backend = openstack_test` diff --git a/senlin/tests/drivers/os_test/__init__.py b/senlin/tests/drivers/os_test/__init__.py deleted file mode 100644 index 00bce3346..000000000 --- a/senlin/tests/drivers/os_test/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from senlin.tests.drivers.os_test import cinder_v2 -from senlin.tests.drivers.os_test import glance_v2 -from senlin.tests.drivers.os_test import heat_v1 -from senlin.tests.drivers.os_test import keystone_v3 -from senlin.tests.drivers.os_test import lbaas -from senlin.tests.drivers.os_test import mistral_v2 -from senlin.tests.drivers.os_test import neutron_v2 -from senlin.tests.drivers.os_test import nova_v2 -from senlin.tests.drivers.os_test import octavia_v2 -from senlin.tests.drivers.os_test import zaqar_v2 - - -block_storage = cinder_v2.CinderClient -compute = nova_v2.NovaClient -glance = glance_v2.GlanceClient -identity = keystone_v3.KeystoneClient -loadbalancing = lbaas.LoadBalancerDriver -message = zaqar_v2.ZaqarClient -network = neutron_v2.NeutronClient -octavia = octavia_v2.OctaviaClient -orchestration = heat_v1.HeatClient -workflow = mistral_v2.MistralClient diff --git a/senlin/tests/drivers/os_test/cinder_v2.py b/senlin/tests/drivers/os_test/cinder_v2.py deleted file mode 100644 index d5e3b032e..000000000 --- a/senlin/tests/drivers/os_test/cinder_v2.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class CinderClient(base.DriverBase): - """Fake Cinder V2 driver for test.""" - def __init__(self, ctx): - self.fake_volume_create = { - "id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c", - "size": 2, - "links": [ - { - "href": " ", - "rel": "self" - } - ] - } - - self.fake_volume_get = { - "status": "available", - "attachments": [], - "links": [ - { - "href": " ", - "rel": "self" - }, - { - "href": " ", - "rel": "bookmark" - } - ], - "availability_zone": "nova", - "bootable": "false", - "os-vol-host-attr:host": "ip-10-168-107-25", - "source_volid": "", - "snapshot_id": "", - "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "description": "Super volume.", - "name": "vol-002", - "created_at": "2013-02-25T02:40:21.000000", - "volume_type": "None", - "os-vol-tenant-attr:tenant_id": "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "os-volume-replication:driver_data": "", - "os-volume-replication:extended_status": "", - "metadata": { - "contents": "not junk" - } - } - - self.fake_snapshot_create = { - "name": "snap-001", - "description": "Daily backup", - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "force": True - } - - self.fake_snapshot_get = { - "status": "available", - "os-extended-snapshot-attributes:progress": "100%", - "description": "Daily backup", - "created_at": "2013-02-25T04:13:17.000000", - "metadata": {}, - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "os-extended-snapshot-attributes:project_id": - "0c2eba2c5af04d3f9e9d0d410b371fde", - "size": 1, - "id": "2bb856e1-b3d8-4432-a858-09e4ce939389", - "name": "snap-001" - } - - def volume_create(self, **params): - return sdk.FakeResourceObject(self.fake_volume_create) - - def volume_get(self, volume_id): - sdk.FakeResourceObject(self.fake_volume_get) - - def volume_delete(self, volume_id, ignore_missing=True): - return - - def snapshot_create(self, **params): - return sdk.FakeResourceObject(self.fake_snapshot_create) - - def snapshot_get(self, volume_id): - sdk.FakeResourceObject(self.fake_snapshot_get) - - def snapshot_delete(self, volume_id, ignore_missing=True): - return diff --git a/senlin/tests/drivers/os_test/glance_v2.py b/senlin/tests/drivers/os_test/glance_v2.py deleted file mode 100644 index bf9102a06..000000000 --- a/senlin/tests/drivers/os_test/glance_v2.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class GlanceClient(base.DriverBase): - """Fake Glance V2 driver.""" - - def __init__(self, ctx): - self.fake_image = { - "created": "2015-01-01T01:02:03Z", - "id": "70a599e0-31e7-49b7-b260-868f441e862b", - "links": [], - "metadata": { - "architecture": "x86_64", - "auto_disk_config": "True", - "kernel_id": "nokernel", - "ramdisk_id": "nokernel" - }, - "minDisk": 0, - "minRam": 0, - "name": "cirros-0.3.5-x86_64-disk", - "progress": 100, - "status": "ACTIVE", - "updated": "2011-01-01T01:02:03Z" - } - - def image_find(self, name_or_id, ignore_missing=False): - return sdk.FakeResourceObject(self.fake_image) diff --git a/senlin/tests/drivers/os_test/heat_v1.py b/senlin/tests/drivers/os_test/heat_v1.py deleted file mode 100644 index c96cf5a25..000000000 --- a/senlin/tests/drivers/os_test/heat_v1.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class HeatClient(base.DriverBase): - """Heat V1 driver.""" - - def __init__(self, params): - super(HeatClient, self).__init__(params) - self.fake_stack_create = { - "id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c", - "links": [ - { - "href": " ", - "rel": "self" - } - ] - } - - self.fake_stack_get = { - "capabilities": [], - "creation_time": "2014-06-03T20:59:46Z", - "description": "sample stack", - "disable_rollback": True, - "id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c", - "links": [ - { - "href": " ", - "rel": "self" - } - ], - "notification_topics": [], - "outputs": [], - "parameters": { - "OS::project_id": "3ab5b02f-a01f-4f95-afa1-e254afc4a435", - "OS::stack_id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c", - "OS::stack_name": "simple_stack" - }, - "stack_name": "simple_stack", - "stack_owner": "simple_username", - "stack_status": "CREATE_COMPLETE", - "stack_status_reason": "Stack CREATE completed successfully", - "template_description": "sample stack", - "stack_user_project_id": "65728b74-cfe7-4f17-9c15-11d4f686e591", - "timeout_mins": "", - "updated_time": "", - "parent": "", - "tags": "", - "status": "CREATE_COMPLETE" - } - - def stack_create(self, **params): - return sdk.FakeResourceObject(self.fake_stack_create) - - def stack_get(self, stack_id): - return sdk.FakeResourceObject(self.fake_stack_get) - - def stack_find(self, name_or_id): - return sdk.FakeResourceObject(self.fake_stack_get) - - def stack_update(self, stack_id, **params): - self.fake_stack_get["status"] = "UPDATE_COMPLETE" - return sdk.FakeResourceObject(self.fake_stack_get) - - def stack_delete(self, stack_id, ignore_missing=True): - return - - def wait_for_stack(self, stack_id, status, failures=None, interval=2, - timeout=None): - return - - def wait_for_stack_delete(self, stack_id, timeout=None): - return diff --git a/senlin/tests/drivers/os_test/keystone_v3.py b/senlin/tests/drivers/os_test/keystone_v3.py deleted file mode 100644 index 433ccae96..000000000 --- a/senlin/tests/drivers/os_test/keystone_v3.py +++ /dev/null @@ -1,156 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from senlin.drivers import base -from senlin.drivers import sdk - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class KeystoneClient(base.DriverBase): - """Keystone V3 driver.""" - - def __init__(self, params): - super(KeystoneClient, self).__init__(params) - self.conn = sdk.create_connection(params) - self.session = self.conn.session - - @sdk.translate_exception - def trust_get_by_trustor(self, trustor, trustee=None, project=None): - """Get trust by trustor. - - Note we cannot provide two or more filters to keystone due to - constraints in keystone implementation. We do additional filtering - after the results are returned. - - :param trustor: ID of the trustor; - :param trustee: ID of the trustee; - :param project: ID of the project to which the trust is scoped. - :returns: The trust object or None if no matching trust is found. - """ - filters = {'trustor_user_id': trustor} - - trusts = [t for t in self.conn.identity.trusts(**filters)] - - for trust in trusts: - if (trustee and trust.trustee_user_id != trustee): - continue - - if (project and trust.project_id != project): - continue - - return trust - - return None - - @sdk.translate_exception - def trust_create(self, trustor, trustee, project, roles=None, - impersonation=True): - """Create trust between two users. - - :param trustor: ID of the user who is the trustor. - :param trustee: ID of the user who is the trustee. - :param project: Scope of the trust which is a project ID. - :param roles: List of roles the trustee will inherit from the trustor. - :param impersonation: Whether the trustee is allowed to impersonate - the trustor. - """ - - if roles: - role_list = [{'name': role} for role in roles] - else: - role_list = [] - params = { - 'trustor_user_id': trustor, - 'trustee_user_id': trustee, - 'project_id': project, - 'impersonation': impersonation, - 'allow_redelegation': True, - 'roles': role_list - } - - result = self.conn.identity.create_trust(**params) - - return result - - @classmethod - @sdk.translate_exception - def get_token(cls, **creds): - """Get token using given credential""" - - access_info = sdk.authenticate(**creds) - return access_info['token'] - - @classmethod - @sdk.translate_exception - def get_user_id(cls, **creds): - """Get ID of the user with given credential""" - - access_info = sdk.authenticate(**creds) - return access_info['user_id'] - - @classmethod - def get_service_credentials(cls, **kwargs): - """Senlin service credential to use with Keystone. - - :param kwargs: An additional keyword argument list that can be used - for customizing the default settings. - """ - - creds = { - 'auth_url': CONF.authentication.auth_url, - 'username': CONF.authentication.service_username, - 'password': CONF.authentication.service_password, - 'project_name': CONF.authentication.service_project_name, - 'user_domain_name': cfg.CONF.authentication.service_user_domain, - 'project_domain_name': - cfg.CONF.authentication.service_project_domain, - 'verify': cfg.CONF.authentication.verify_ssl, - 'interface': cfg.CONF.authentication.interface, - } - creds.update(**kwargs) - return creds - - @sdk.translate_exception - def validate_regions(self, regions): - """Check whether the given regions are valid. - - :param regions: A list of regions for validation. - :returns: A list of regions that are found available on keystone. - """ - region_list = self.conn.identity.regions() - known = [r['id'] for r in region_list] - - validated = [] - for r in regions: - if r in known: - validated.append(r) - else: - LOG.warning('Region %s is not found.', r) - - return validated - - @sdk.translate_exception - def get_senlin_endpoint(self): - """Get Senlin service endpoint.""" - region = cfg.CONF.default_region_name - # TODO(Yanyan Hu): Currently, region filtering is unsupported in - # session.get_endpoint(). Need to propose fix to openstacksdk. - base = self.conn.session.get_endpoint(service_type='clustering', - interface='public', - region=region) - - return base diff --git a/senlin/tests/drivers/os_test/lbaas.py b/senlin/tests/drivers/os_test/lbaas.py deleted file mode 100644 index 14caad65c..000000000 --- a/senlin/tests/drivers/os_test/lbaas.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base - - -class LoadBalancerDriver(base.DriverBase): - def __init__(self, params): - self.lb_result = { - "loadbalancer": "a36c20d0-18e9-42ce-88fd-82a35977ee8c", - "vip_address": "192.168.1.100", - "listener": "35cb8516-1173-4035-8dae-0dae3453f37f", - "pool": "4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5", - "healthmonitor": "0a9ac99d-0a09-4b18-8499-a0796850279a" - } - - self.member_id = "9a7aff27-fd41-4ec1-ba4c-3eb92c629313" - - def lb_create(self, vip, pool, cluster_name, hm=None, az=None, - flavor_id=None): - return True, self.lb_result - - def lb_delete(self, **kwargs): - return True, 'LB deletion succeeded' - - def member_add(self, node, lb_id, pool_id, port, subnet): - return self.member_id - - def member_remove(self, lb_id, pool_id, member_id): - return True diff --git a/senlin/tests/drivers/os_test/mistral_v2.py b/senlin/tests/drivers/os_test/mistral_v2.py deleted file mode 100644 index d4e5a98f4..000000000 --- a/senlin/tests/drivers/os_test/mistral_v2.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class MistralClient(base.DriverBase): - """Fake Mistral V2 driver.""" - - def __init__(self, params): - self.fake_workflow = {} - self.fake_execution = {} - - def workflow_create(self, definition, scope): - return sdk.FakeResourceObject(self.fake_workflow) - - def workflow_delete(self, workflow, ignore_missing=True): - return None - - def workflow_find(self, name_or_id, ignore_missing=True): - return sdk.FakeResourceObject(self.fake_workflow) - - def execution_create(self, name, inputs): - return sdk.FakeResourceObject(self.fake_execution) - - def execution_delete(self, execution, ignore_missing=True): - return None - - def wait_for_execution(self, execution, status='SUCCESS', - failures=['ERROR'], interval=2, - timeout=None): - return None diff --git a/senlin/tests/drivers/os_test/neutron_v2.py b/senlin/tests/drivers/os_test/neutron_v2.py deleted file mode 100644 index bc91a793d..000000000 --- a/senlin/tests/drivers/os_test/neutron_v2.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - - -class NeutronClient(base.DriverBase): - """Fake Neutron V2 driver for test.""" - - def __init__(self, ctx): - self.fake_network = { - "status": "ACTIVE", - "subnets": [ - "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" - ], - "name": "private-network", - "router:external": False, - "admin_state_up": True, - "tenant_id": "4fd44f30292945e481c7b8a0c8908869", - "mtu": 0, - "shared": True, - "port_security_enabled": True, - "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" - } - self.fake_port = { - "ip_address": "10.0.1.10", - "fixed_ips": [ - "172.17.1.129" - ], - "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", - "status": "ACTIVE", - "subnet_id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", - "id": "60f65938-3ebb-451d-a3a3-a0918d345469", - "security_group_ids": [ - "45aa2abc-47f0-4008-8d67-606b41cabb7a" - ] - } - self.fake_subnet = { - "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", - "subnet_pool_id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", - "id": "60f65938-3ebb-451d-a3a3-a0918d345469" - } - - def network_get(self, value, ignore_missing=False): - return sdk.FakeResourceObject(self.fake_network) - - def port_create(self, **attr): - return sdk.FakeResourceObject(self.fake_port) - - def port_delete(self, port, ignore_missing=True): - return None - - def subnet_get(self, name_or_id, ignore_missing=False): - return sdk.FakeResourceObject(self.fake_subnet) diff --git a/senlin/tests/drivers/os_test/nova_v2.py b/senlin/tests/drivers/os_test/nova_v2.py deleted file mode 100644 index fb9394e82..000000000 --- a/senlin/tests/drivers/os_test/nova_v2.py +++ /dev/null @@ -1,280 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import time - -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.drivers import base -from senlin.drivers import sdk - - -class NovaClient(base.DriverBase): - """Fake Nova V2 driver for test.""" - - def __init__(self, ctx): - self.fake_flavor = { - "is_disabled": False, - "disk": 1, - "OS-FLV-EXT-DATA:ephemeral": 0, - "os-flavor-access:is_public": True, - "id": "1", - "links": [], - "name": "m1.tiny", - "ram": 512, - "swap": "", - "vcpus": 1, - } - - self.fake_image = { - "created": "2015-01-01T01:02:03Z", - "id": "70a599e0-31e7-49b7-b260-868f441e862b", - "links": [], - "metadata": { - "architecture": "x86_64", - "auto_disk_config": "True", - "kernel_id": "nokernel", - "ramdisk_id": "nokernel" - }, - "minDisk": 0, - "minRam": 0, - "name": "cirros-0.3.5-x86_64-disk", - "progress": 100, - "status": "ACTIVE", - "updated": "2011-01-01T01:02:03Z" - } - - self.fake_server_create = { - "id": "893c7791-f1df-4c3d-8383-3caae9656c62", - "availability_zone": "Zone1", - "name": "new-server-test", - "imageRef": "http://localhost/openstack/images/test-image", - "flavorRef": "http://localhost/openstack/flavors/1", - "metadata": { - "My Server Name": "Apache1" - }, - "personality": [ - { - "path": "/etc/banner.txt", - "contents": "personality-content" - } - ], - "block_device_mapping_v2": [ - { - "device_name": "/dev/sdb1", - "source_type": "blank", - "destination_type": "local", - "delete_on_termination": "True", - "guest_format": "swap", - "boot_index": "-1" - }, - { - "device_name": "/dev/sda1", - "source_type": "volume", - "destination_type": "volume", - "uuid": "fake-volume-id-1", - "boot_index": "0" - } - ] - } - - self.fake_server_get = { - # Note: The name of some attrs are defined as following to keep - # compatible with the resource definition in openstacksdk. But - # the real name of these attrs returned by Nova API could be - # different, e.g. the name of 'access_ipv4' attribute is actually - # 'accessIPv4' in server_get API response. - "id": "893c7791-f1df-4c3d-8383-3caae9656c62", - "name": "new-server-test", - "availability_zone": "ZONE1", - "access_ipv4": "192.168.0.3", - "access_ipv6": "fe80::ac0e:2aff:fe87:5911", - "addresses": { - "private": [ - { - "addr": "192.168.0.3", - "version": 4 - } - ] - }, - "created_at": "2015-08-18T21:11:09Z", - "updated_at": "2012-08-20T21:11:09Z", - "flavor": { - "id": "1", - "links": [] - }, - "host_id": "65201c14a29663e06d0748e561207d998b343", - "image": { - "id": "FAKE_IMAGE_ID", - "links": [] - }, - "links": [], - "metadata": { - "My Server Name": "Apache1" - }, - "progress": 0, - "status": "ACTIVE", - "project_id": "openstack", - "user_id": "fake" - } - - self.fake_service_list = [ - { - 'id': 'IDENTIFIER1', - 'binary': 'nova-api', - 'host': 'host1', - 'status': 'enabled', - 'state': 'up', - 'zone': 'nova' - }, - { - 'id': 'IDENTIFIER2', - 'binary': 'nova-compute', - 'host': 'host1', - 'status': 'enabled', - 'state': 'up', - 'zone': 'nova' - }, - ] - - self.keypair = { - 'public_key': 'blahblah', - 'type': 'ssh', - 'name': 'oskey', - 'fingerprint': 'not-real', - } - - self.availability_zone = { - 'zoneState': { - 'available': True - }, - 'hosts': None, - 'zoneName': 'nova', - } - - self.simulated_waits = {} - - def flavor_find(self, name_or_id, ignore_missing=False): - return sdk.FakeResourceObject(self.fake_flavor) - - def flavor_list(self, details=True, **query): - return [sdk.FakeResourceObject(self.fake_flavor)] - - def image_find(self, name_or_id, ignore_missing=False): - return sdk.FakeResourceObject(self.fake_image) - - def image_list(self, details=True, **query): - return [sdk.FakeResourceObject(self.fake_image)] - - def keypair_list(self, details=True, **query): - return [sdk.FakeResourceObject(self.fake_keypair)] - - def keypair_find(self, name_or_id, ignore_missing=False): - return sdk.FakeResourceObject(self.keypair) - - def server_create(self, **attrs): - server_id = uuidutils.generate_uuid() - self.fake_server_create['id'] = server_id - self.fake_server_get['id'] = server_id - - # save simulated wait time if it was set in metadata - if ('metadata' in attrs and - 'simulated_wait_time' in attrs['metadata']): - simulated_wait = attrs['metadata']['simulated_wait_time'] - if (isinstance(simulated_wait, int) and simulated_wait > 0): - self.simulated_waits[server_id] = simulated_wait - - return sdk.FakeResourceObject(self.fake_server_create) - - def server_get(self, server): - return sdk.FakeResourceObject(self.fake_server_get) - - def wait_for_server(self, server, status=consts.VS_ACTIVE, - failures=None, - interval=2, timeout=None): - # sleep for simulated wait time if it was supplied during server_create - if server in self.simulated_waits: - time.sleep(self.simulated_waits[server]) - return - - def wait_for_server_delete(self, server, timeout=None): - # sleep for simulated wait time if it was supplied during server_create - if server in self.simulated_waits: - time.sleep(self.simulated_waits[server]) - del self.simulated_waits[server] - return - - def server_update(self, server, **attrs): - self.fake_server_get.update(attrs) - return sdk.FakeResourceObject(self.fake_server_get) - - def server_rebuild(self, server, imageref, name=None, admin_password=None, - **attrs): - if imageref: - attrs['image'] = {'id': imageref} - if name: - attrs['name'] = name - if admin_password: - attrs['adminPass'] = admin_password - self.fake_server_get.update(attrs) - - return sdk.FakeResourceObject(self.fake_server_get) - - def server_resize(self, server, flavor): - self.fake_server_get['flavor'].update({'id': flavor}) - - def server_resize_confirm(self, server): - return - - def server_resize_revert(self, server): - return - - def server_reboot(self, server, reboot_type): - return - - def server_delete(self, server, ignore_missing=True): - return - - def server_stop(self, server): - return - - def server_force_delete(self, server, ignore_missing=True): - return - - def server_metadata_get(self, server): - return {} - - def server_metadata_update(self, server, metadata): - new_server = copy.deepcopy(self.fake_server_get) - new_server['metadata'] = metadata - server = sdk.FakeResourceObject(new_server) - return server - - def server_metadata_delete(self, server, keys): - return - - def service_list(self): - return sdk.FakeResourceObject(self.fake_service_list) - - def service_force_down(self, service, host, binary): - return - - def service_enable(self, service, host, binary): - return - - def service_disable(self, service, host, binary): - return - - def availability_zone_list(self, **query): - return [sdk.FakeResourceObject(self.availability_zone)] diff --git a/senlin/tests/drivers/os_test/octavia_v2.py b/senlin/tests/drivers/os_test/octavia_v2.py deleted file mode 100644 index d5eb76ada..000000000 --- a/senlin/tests/drivers/os_test/octavia_v2.py +++ /dev/null @@ -1,224 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - -FAKE_LB_ID = "607226db-27ef-4d41-ae89-f2a800e9c2db" -FAKE_LISTENER_ID = "023f2e34-7806-443b-bfae-16c324569a3d" -FAKE_HM_ID = "8ed3c5ac-6efa-420c-bedb-99ba14e58db5" -FAKE_MEMBER_ID = "957a1ace-1bd2-449b-8455-820b6e4b63f3" -FAKE_POOL_ID = "4029d267-3983-4224-a3d0-afb3fe16a2cd" -FAKE_PROJECT_ID = "e3cd678b11784734bc366148aa37580e" -FAKE_SUBNET_ID = "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa" - - -class OctaviaClient(base.DriverBase): - """Fake octavia V2 driver for test.""" - - def __init__(self, ctx): - self.fake_lb = { - "admin_state_up": True, - "availability_zone": "test_az", - "description": "Best App load balancer 1", - "id": FAKE_LB_ID, - "listeners": [{"id": FAKE_LISTENER_ID}], - "name": "bestapplb1", - "operating_status": "ONLINE", - "pools": [], - "project_id": FAKE_PROJECT_ID, - "provider": "octavia", - "provisioning_status": "ACTIVE", - "vip_address": "203.0.113.10", - "vip_port_id": "1e20d91d-8df9-4c15-9778-28bc89226c19", - "vip_subnet_id": "08dce793-daef-411d-a896-d389cd45b1ea", - "vip_network_id": "e2de51e5-f10a-40f3-8f5c-7bab784b1380", - } - - self.fake_listener = { - "admin_state_up": True, - "connection_limit": 200, - "created_at": "2017-02-28T00:42:44", - "description": "A great TLS listener", - "default_pool_id": FAKE_POOL_ID, - "default_tls_container_ref": "http://fake_url", - "description": "A great TLS listener", - "id": FAKE_LISTENER_ID, - "insert_headers": { - "X-Forwarded-For": "true", - "X-Forwarded-Port": "true" - }, - "l7policies": [{"id": "5e618272-339d-4a80-8d14-dbc093091bb1"}], - "loadbalancers": [{"id": FAKE_LB_ID}], - "name": "great_tls_listener", - "operating_status": "ONLINE", - "project_id": FAKE_PROJECT_ID, - "protocol": "TERMINATED_HTTPS", - "protocol_port": 443, - "provisioning_status": "ACTIVE", - "sni_container_refs": [ - "http://loc1", "http://loca2" - ], - "updated_at": "2017-02-28T00:44:30" - } - - self.fake_pool = { - "admin_state_up": True, - "created_at": "2017-05-10T18:14:44", - "description": "Super Round Robin Pool", - "healthmonitor_id": FAKE_HM_ID, - "id": FAKE_POOL_ID, - "lb_algorithm": "ROUND_ROBIN", - "listeners": [{"id": FAKE_LISTENER_ID}], - "loadbalancers": [{"id": FAKE_LB_ID}], - "members": [], - "name": "super-pool", - "operating_status": "ONLINE", - "project_id": FAKE_PROJECT_ID, - "protocol": "HTTP", - "provisioning_status": "ACTIVE", - "session_persistence": { - "cookie_name": "ChocolateChip", - "type": "APP_COOKIE" - }, - "updated_at": "2017-05-10T23:08:12" - } - - self.fake_member = { - "address": "192.0.2.16", - "admin_state_up": True, - "created_at": "2017-05-11T17:21:34", - "id": FAKE_MEMBER_ID, - "monitor_address": None, - "monitor_port": 8080, - "name": "web-server-1", - "operating_status": "NO_MONITOR", - "project_id": FAKE_PROJECT_ID, - "protocol_port": 80, - "provisioning_status": "ACTIVE", - "subnet_id": FAKE_SUBNET_ID, - "updated_at": "2017-05-11T17:21:37", - "weight": 20, - } - - self.fake_hm = { - "admin_state_up": True, - "created_at": "2017-05-11T23:53:47", - "delay": 10, - "expected_codes": 200, - "http_method": "GET", - "id": FAKE_HM_ID, - "max_retries": 1, - "max_retries_down": 3, - "name": "super-pool-health-monitor", - "operating_status": "ONLINE", - "pools": [{"id": FAKE_POOL_ID}], - "project_id": FAKE_PROJECT_ID, - "provisioning_status": "ACTIVE", - "timeout": 5, - "type": "HTTP", - "updated_at": "2017-05-11T23:53:47", - "url_path": "/" - } - - def loadbalancer_create(self, vip_subnet_id=None, vip_network_id=None, - vip_address=None, admin_state_up=True, name=None, - description=None): - self.fake_lb["vip_subnet_id"] = vip_subnet_id - self.fake_lb["vip_network_id"] = vip_network_id - self.fake_lb["admin_state_up"] = admin_state_up - if vip_address: - self.fake_lb["vip_address"] = vip_address - if name: - self.fake_lb["name"] = name - if description: - self.fake_lb["description"] = description - return sdk.FakeResourceObject(self.fake_lb) - - def loadbalancer_delete(self, lb_id, ignore_missing=True): - return - - def loadbalancer_get(self, name_or_id, ignore_missing=True, - show_deleted=False): - if name_or_id in (self.fake_lb["id"], self.fake_lb["name"]): - return sdk.FakeResourceObject(self.fake_lb) - return None - - def listener_create(self, loadbalancer_id, protocol, protocol_port, - connection_limit=None, admin_state_up=True, - name=None, description=None): - self.fake_listener["loadbalancers"] = [{"id": loadbalancer_id}] - self.fake_listener["protocol"] = protocol - self.fake_listener["protocol_port"] = protocol_port - self.fake_listener["admin_state_up"] = admin_state_up - if connection_limit: - self.fake_listener["connection_limit"] = connection_limit - if name: - self.fake_listener["name"] = name - if description: - self.fake_listener["description"] = description - - return sdk.FakeResourceObject(self.fake_listener) - - def listener_delete(self, listener_id, ignore_missing=True): - return - - def pool_create(self, lb_algorithm, listener_id, protocol, - admin_state_up=True, name=None, description=None): - self.fake_pool["lb_algorithm"] = lb_algorithm - self.fake_pool["listeners"] = [{"id": listener_id}] - self.fake_pool["protocol"] = protocol - self.fake_pool["admin_state_up"] = admin_state_up - if name: - self.fake_pool["name"] = name - if description: - self.fake_pool["description"] = description - return sdk.FakeResourceObject(self.fake_pool) - - def pool_delete(self, pool_id, ignore_missing=True): - return - - def pool_member_create(self, name, pool_id, address, protocol_port, - subnet_id, weight=None, admin_state_up=True): - # pool_id is ignored - self.fake_member["name"] = name - self.fake_member["address"] = address - self.fake_member["protocol_port"] = protocol_port - self.fake_member["subnet_id"] = subnet_id - self.fake_member["admin_state_up"] = admin_state_up - if weight: - self.fake_member["weight"] = weight - return sdk.FakeResourceObject(self.fake_member) - - def pool_member_delete(self, pool_id, member_id, ignore_missing=True): - return - - def healthmonitor_create(self, hm_type, delay, timeout, max_retries, - pool_id, admin_state_up=True, http_method=None, - url_path=None, expected_codes=None): - self.fake_hm["type"] = hm_type - self.fake_hm["delay"] = delay - self.fake_hm["timeout"] = timeout - self.fake_hm["max_retries"] = max_retries - self.fake_hm["pools"] = [{"id": pool_id}] - self.fake_hm["admin_state_up"] = admin_state_up - if http_method: - self.fake_hm["http_method"] = http_method - if url_path: - self.fake_hm["url_path"] = url_path - if expected_codes: - self.fake_hm["expected_codes"] = expected_codes - - return sdk.FakeResourceObject(self.fake_hm) - - def healthmonitor_delete(self, hm_id, ignore_missing=True): - return diff --git a/senlin/tests/drivers/os_test/zaqar_v2.py b/senlin/tests/drivers/os_test/zaqar_v2.py deleted file mode 100644 index 221f828ad..000000000 --- a/senlin/tests/drivers/os_test/zaqar_v2.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.drivers import base -from senlin.drivers import sdk - -FAKE_SUBSCRIPTION_ID = "0d8dbb71-1538-42ac-99fb-bb52d0ad1b6f" -FAKE_MESSAGE_ID = "51db6f78c508f17ddc924357" -FAKE_CLAIM_ID = "51db7067821e727dc24df754" - - -class ZaqarClient(base.DriverBase): - """Fake zaqar V2 driver for test.""" - - def __init__(self, ctx): - self.fake_subscription = { - "subscription_id": FAKE_SUBSCRIPTION_ID - } - self.fake_claim = { - "messages": [ - { - "body": { - "event": "BackupStarted" - }, - "age": 239, - "href": "/v2/queues/demoqueue/messages/" + - FAKE_MESSAGE_ID + "?claim_id=" + FAKE_CLAIM_ID, - "ttl": 300 - } - ] - } - self.fake_message = { - "resources": [ - "/v2/queues/demoqueue/messages/" + FAKE_MESSAGE_ID - ] - } - - def queue_create(self, **attrs): - return - - def queue_exists(self, queue_name): - return True - - def queue_delete(self, queue, ignore_missing=True): - return None - - def subscription_create(self, queue_name, **attrs): - return sdk.FakeResourceObject(self.fake_subscription) - - def subscription_delete(self, queue_name, subscription, - ignore_missing=True): - return None - - def claim_create(self, queue_name, **attrs): - return sdk.FakeResourceObject(self.fake_claim) - - def claim_delete(self, queue_name, claim, ignore_missing=True): - return None - - def message_delete(self, queue_name, message, claim_id=None, - ignore_missing=True): - return None - - def message_post(self, queue_name, message): - return sdk.FakeResourceObject(self.fake_message) diff --git a/senlin/tests/unit/__init__.py b/senlin/tests/unit/__init__.py deleted file mode 100644 index 01be0697e..000000000 --- a/senlin/tests/unit/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet - -from senlin import objects - -eventlet.monkey_patch(os=False) - -# The following has to be done after eventlet monkey patching or else the -# threading.local() store used in oslo_messaging will be initialized to -# thread-local storage rather than green-thread local. This will cause context -# sets and deletes in that storage to clobber each other. -# Make sure we have all objects loaded. This is done at module import time, -# because we may be using mock decorators in our tests that run at import -# time. -objects.register_all() diff --git a/senlin/tests/unit/api/__init__.py b/senlin/tests/unit/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/api/common/__init__.py b/senlin/tests/unit/api/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/api/common/test_serializers.py b/senlin/tests/unit/api/common/test_serializers.py deleted file mode 100644 index a2e3c7eb3..000000000 --- a/senlin/tests/unit/api/common/test_serializers.py +++ /dev/null @@ -1,201 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils as tu -import webob - -from senlin.api.common import serializers -from senlin.api.common import wsgi -from senlin.common import exception -from senlin.tests.unit.common import base - - -class JSONRequestDeserializerTest(base.SenlinTestCase): - - def test_has_body_no_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('asdf') - request.headers.pop('Content-Length') - request.headers['Content-Type'] = 'application/json' - obj = serializers.JSONRequestDeserializer() - self.assertFalse(obj.has_body(request)) - - def test_has_body_zero_content_length(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('asdf') - request.headers['Content-Length'] = 0 - request.headers['Content-Type'] = 'application/json' - obj = serializers.JSONRequestDeserializer() - self.assertFalse(obj.has_body(request)) - - def test_has_body_has_content_length_no_content_type(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - obj = serializers.JSONRequestDeserializer() - self.assertTrue(obj.has_body(request)) - - def test_has_body_has_content_length_plain_content_type(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - request.headers['Content-Type'] = 'text/plain' - obj = serializers.JSONRequestDeserializer() - self.assertTrue(obj.has_body(request)) - - def test_has_body_has_content_type_malformed(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('asdf') - self.assertIn('Content-Length', request.headers) - request.headers['Content-Type'] = 'application/json' - obj = serializers.JSONRequestDeserializer() - self.assertFalse(obj.has_body(request)) - - def test_has_body_has_content_type(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - request.headers['Content-Type'] = 'application/json' - obj = serializers.JSONRequestDeserializer() - self.assertTrue(obj.has_body(request)) - - def test_has_body_has_wrong_content_type(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - request.headers['Content-Type'] = 'application/xml' - obj = serializers.JSONRequestDeserializer() - self.assertFalse(obj.has_body(request)) - - def test_has_body_has_aws_content_type_only(self): - request = wsgi.Request.blank('/?ContentType=JSON') - request.method = 'GET' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - obj = serializers.JSONRequestDeserializer() - self.assertTrue(obj.has_body(request)) - - def test_has_body_content_type_with_get(self): - request = wsgi.Request.blank('/') - request.method = 'GET' - request.body = encodeutils.safe_encode('{"key": "value"}') - self.assertIn('Content-Length', request.headers) - obj = serializers.JSONRequestDeserializer() - self.assertTrue(obj.has_body(request)) - - def test_no_body_no_content_length(self): - request = wsgi.Request.blank('/') - obj = serializers.JSONRequestDeserializer() - self.assertFalse(obj.has_body(request)) - - def test_from_json(self): - fixture = '{"key": "value"}' - expected = {"key": "value"} - actual = serializers.JSONRequestDeserializer().from_json(fixture) - self.assertEqual(expected, actual) - - def test_from_json_malformed(self): - fixture = 'kjasdklfjsklajf' - self.assertRaises(webob.exc.HTTPBadRequest, - serializers.JSONRequestDeserializer().from_json, - fixture) - - def test_default_no_body(self): - request = wsgi.Request.blank('/') - actual = serializers.JSONRequestDeserializer().default(request) - expected = {} - self.assertEqual(expected, actual) - - def test_default_with_body(self): - request = wsgi.Request.blank('/') - request.method = 'POST' - request.body = encodeutils.safe_encode('{"key": "value"}') - actual = serializers.JSONRequestDeserializer().default(request) - expected = {"body": {"key": "value"}} - self.assertEqual(expected, actual) - - def test_default_with_get_with_body(self): - request = wsgi.Request.blank('/') - request.method = 'GET' - request.body = encodeutils.safe_encode('{"key": "value"}') - actual = serializers.JSONRequestDeserializer().default(request) - expected = {"body": {"key": "value"}} - self.assertEqual(expected, actual) - - def test_default_with_get_with_body_with_aws(self): - request = wsgi.Request.blank('/?ContentType=JSON') - request.method = 'GET' - request.body = encodeutils.safe_encode('{"key": "value"}') - actual = serializers.JSONRequestDeserializer().default(request) - expected = {"body": {"key": "value"}} - self.assertEqual(expected, actual) - - def test_from_json_exceeds_max_json_mb(self): - cfg.CONF.set_override('max_json_body_size', 10, group='senlin_api') - body = jsonutils.dumps(['a'] * cfg.CONF.senlin_api.max_json_body_size) - self.assertGreater(len(body), cfg.CONF.senlin_api.max_json_body_size) - obj = serializers.JSONRequestDeserializer() - error = self.assertRaises(exception.RequestLimitExceeded, - obj.from_json, - body) - msg = ('Request limit exceeded: JSON body size ' - '(%s bytes) exceeds maximum allowed size (%s bytes).' - ) % (len(body), cfg.CONF.senlin_api.max_json_body_size) - self.assertEqual(msg, str(error)) - - -class JSONResponseSerializerTest(base.SenlinTestCase): - - def test_to_json(self): - fixture = {"key": "value"} - expected = '{"key": "value"}' - actual = serializers.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_to_json_with_date_format_value(self): - test_date = tu.parse_strtime("0001-03-08T02:00:00", - '%Y-%m-%dT%H:%M:%S') - fixture = {"date": test_date} - expected = '{"date": "0001-03-08T02:00:00"}' - actual = serializers.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_to_json_with_more_deep_format(self): - val = complex(1, 2) - fixture = {"is_public": True, "v": val} - expected = '{"is_public": true, "v": "(1+2j)"}' - actual = serializers.JSONResponseSerializer().to_json(fixture) - self.assertEqual(expected, actual) - - def test_default(self): - fixture = {"key": "value"} - response = webob.Response() - serializers.JSONResponseSerializer().default(response, fixture) - self.assertEqual(200, response.status_int) - content_types = [h for h in response.headerlist - if h[0] == 'Content-Type'] - # NOTE: filter returns a iterator in python 3. - types = [t for t in content_types] - self.assertEqual(1, len(types)) - self.assertEqual('application/json', response.content_type) - self.assertEqual('{"key": "value"}', - encodeutils.safe_decode(response.body)) diff --git a/senlin/tests/unit/api/common/test_util.py b/senlin/tests/unit/api/common/test_util.py deleted file mode 100644 index 5484c3cac..000000000 --- a/senlin/tests/unit/api/common/test_util.py +++ /dev/null @@ -1,230 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonschema -from unittest import mock - -from webob import exc - -from senlin.api.common import util -from senlin.api.common import wsgi -from senlin.common import context -from senlin.common import policy -from senlin.objects import base as obj_base -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class FakeRequest(obj_base.SenlinObject): - - VERSION = '2.0' - VERSION_MAP = { - '1.3': '2.0' - } - - @classmethod - def obj_from_primitive(cls, primitive): - pass - - -class TestGetAllowedParams(base.SenlinTestCase): - - def setUp(self): - super(TestGetAllowedParams, self).setUp() - req = wsgi.Request({}) - self.params = req.params.copy() - self.params.add('foo', 'foo value') - self.whitelist = {'foo': 'single'} - - def test_returns_empty_dict(self): - self.whitelist = {} - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertEqual({}, result) - - def test_only_adds_whitelisted_params_if_param_exists(self): - self.whitelist = {'foo': 'single'} - self.params.clear() - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertNotIn('foo', result) - - def test_returns_only_whitelisted_params(self): - self.params.add('bar', 'bar value') - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertIn('foo', result) - self.assertNotIn('bar', result) - - def test_handles_single_value_params(self): - result = util.get_allowed_params(self.params, self.whitelist) - self.assertEqual('foo value', result['foo']) - - def test_handles_multiple_value_params(self): - self.whitelist = {'foo': 'multi'} - self.params.add('foo', 'foo value 2') - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertEqual(2, len(result['foo'])) - self.assertIn('foo value', result['foo']) - self.assertIn('foo value 2', result['foo']) - - def test_handles_mixed_value_param_with_multiple_entries(self): - self.whitelist = {'foo': 'mixed'} - self.params.add('foo', 'foo value 2') - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertEqual(2, len(result['foo'])) - self.assertIn('foo value', result['foo']) - self.assertIn('foo value 2', result['foo']) - - def test_handles_mixed_value_param_with_single_entry(self): - self.whitelist = {'foo': 'mixed'} - - result = util.get_allowed_params(self.params, self.whitelist) - self.assertEqual(['foo value'], result['foo']) - - def test_ignores_bogus_whitelist_items(self): - self.whitelist = {'foo': 'blah'} - result = util.get_allowed_params(self.params, self.whitelist) - self.assertNotIn('foo', result) - - -class TestPolicyEnforce(base.SenlinTestCase): - - def setUp(self): - super(TestPolicyEnforce, self).setUp() - self.req = wsgi.Request({}) - self.req.context = context.RequestContext(project='foo', - is_admin=False) - - class DummyController(object): - REQUEST_SCOPE = 'test' - - @util.policy_enforce - def an_action(self, req): - return 'woot' - - self.controller = DummyController() - - @mock.patch.object(policy, 'enforce') - def test_policy_enforce_policy_deny(self, mock_enforce): - mock_enforce.return_value = False - - self.assertRaises(exc.HTTPForbidden, - self.controller.an_action, - self.req, tenant_id='foo') - - -class TestParseRequest(base.SenlinTestCase): - - def setUp(self): - super(TestParseRequest, self).setUp() - self.context = utils.dummy_context() - - def test_all_okay(self): - name = 'ClusterListRequest' - body = {'project_safe': True} - req = mock.Mock(context=self.context) - - res = util.parse_request(name, req, body) - - self.assertIsNotNone(res) - - def test_bad_request_name(self): - name = 'BadClusterListRequest' - body = {'project_safe': True} - req = mock.Mock(context=self.context) - - ex = self.assertRaises(exc.HTTPBadRequest, - util.parse_request, - name, req, body) - - self.assertEqual('Unsupported object type BadClusterListRequest', - str(ex)) - - def test_bad_request_body(self): - name = 'ClusterCreateRequest' - body = {'bad_key': 'bad_value'} - req = mock.Mock(context=self.context) - - ex = self.assertRaises(exc.HTTPBadRequest, - util.parse_request, - name, req, body, 'cluster') - - self.assertEqual("Request body missing 'cluster' key.", - str(ex)) - - def test_bad_primitive(self): - name = 'ClusterListRequest' - body = {'limit': -1} - req = mock.Mock(context=self.context) - - ex = self.assertRaises(exc.HTTPBadRequest, - util.parse_request, - name, req, body) - - self.assertEqual("Value must be >= 0 for field 'limit'.", - str(ex)) - - def test_bad_schema(self): - name = 'ClusterListRequest' - body = {'bogus_key': 'bogus_value', - 'project_safe': True} - req = mock.Mock(context=self.context) - - ex = self.assertRaises(exc.HTTPBadRequest, - util.parse_request, - name, req, body) - - self.assertEqual("Additional properties are not allowed ('bogus_key' " - "was unexpected)", str(ex)) - - @mock.patch.object(jsonschema, 'validate') - @mock.patch.object(FakeRequest, 'obj_from_primitive') - @mock.patch.object(obj_base.SenlinObject, 'obj_class_from_name') - def test_version_conversion(self, mock_cls, mock_construct, mock_validate): - name = 'FakeReq' - body = {} - mock_cls.return_value = FakeRequest - # The following context will force the request to be downgraded to - # its base version (1.0) - context = utils.dummy_context(api_version='1.2') - req = mock.Mock(context=context) - obj = mock.Mock() - mock_construct.return_value = obj - primitive = { - 'senlin_object.version': '2.0', - 'senlin_object.name': 'FakeReq', - 'senlin_object.data': {}, - 'senlin_object.namespace': 'senlin' - } - - res = util.parse_request(name, req, body) - - self.assertIsNotNone(res) - mock_cls.assert_called_once_with('FakeReq') - self.assertEqual(2, mock_construct.call_count) - obj.obj_make_compatible.assert_called_once_with(primitive, '1.0') - - -class TestParseBool(base.SenlinTestCase): - - def test_parse_bool(self): - name = 'param' - for value in ('True', 'true', 'TRUE', True): - self.assertTrue(util.parse_bool_param(name, value)) - for value in ('False', 'false', 'FALSE', False): - self.assertFalse(util.parse_bool_param(name, value)) - for value in ('foo', 't', 'f', 'yes', 'no', 'y', 'n', '1', '0', None): - self.assertRaises(exc.HTTPBadRequest, - util.parse_bool_param, name, value) diff --git a/senlin/tests/unit/api/common/test_version_request.py b/senlin/tests/unit/api/common/test_version_request.py deleted file mode 100644 index f6b1acc77..000000000 --- a/senlin/tests/unit/api/common/test_version_request.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.api.common import version_request as vr -from senlin.common import exception -from senlin.tests.unit.common import base - - -class APIVersionRequestTests(base.SenlinTestCase): - - def test_valid_version_strings(self): - def _test_string(version, exp_major, exp_minor): - v = vr.APIVersionRequest(version) - self.assertEqual(v.major, exp_major) - self.assertEqual(v.minor, exp_minor) - - _test_string("1.1", 1, 1) - _test_string("2.10", 2, 10) - _test_string("5.234", 5, 234) - _test_string("12.5", 12, 5) - _test_string("2.0", 2, 0) - _test_string("2.200", 2, 200) - - def test_null_version(self): - v = vr.APIVersionRequest() - self.assertTrue(v.is_null()) - - def test_invalid_version_strings(self): - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "2") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "200") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "2.1.4") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "200.23.66.3") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "5 .3") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "5. 3") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "5.03") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "02.1") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "2.001") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, " 2.1") - - self.assertRaises(exception.InvalidAPIVersionString, - vr.APIVersionRequest, "2.1 ") - - def test_version_comparisons(self): - vers1 = vr.APIVersionRequest("2.0") - vers2 = vr.APIVersionRequest("2.5") - vers3 = vr.APIVersionRequest("5.23") - vers4 = vr.APIVersionRequest("2.0") - v_null = vr.APIVersionRequest() - - self.assertLess(v_null, vers2) - self.assertLess(vers1, vers2) - self.assertLessEqual(vers1, vers2) - self.assertLessEqual(vers1, vers4) - self.assertGreater(vers2, v_null) - self.assertGreater(vers3, vers2) - self.assertGreaterEqual(vers1, vers4) - self.assertGreaterEqual(vers3, vers2) - self.assertNotEqual(vers1, vers2) - self.assertEqual(vers1, vers4) - self.assertNotEqual(vers1, v_null) - self.assertEqual(v_null, v_null) - self.assertRaises(TypeError, vers1.__lt__, "2.1") - - def test_version_matches(self): - vers1 = vr.APIVersionRequest("1.0") - vers2 = vr.APIVersionRequest("1.1") - vers3 = vr.APIVersionRequest("1.2") - vers4 = vr.APIVersionRequest("1.3") - v_null = vr.APIVersionRequest() - - self.assertTrue(vers2.matches(vers1, vers3)) - self.assertTrue(vers2.matches(vers1, vers4)) - self.assertTrue(vers2.matches(vers1, v_null)) - self.assertFalse(vers1.matches(vers2, vers3)) - self.assertFalse(vers1.matches(vers2, vers4)) - self.assertFalse(vers2.matches(vers4, vers1)) - - self.assertRaises(ValueError, v_null.matches, vers1, vers4) - - def test_as_string(self): - vers1_string = "3.23" - vers1 = vr.APIVersionRequest(vers1_string) - self.assertEqual(vers1_string, str(vers1)) diff --git a/senlin/tests/unit/api/common/test_wsgi.py b/senlin/tests/unit/api/common/test_wsgi.py deleted file mode 100644 index 54f09afcd..000000000 --- a/senlin/tests/unit/api/common/test_wsgi.py +++ /dev/null @@ -1,466 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -from unittest import mock - -import fixtures -from oslo_config import cfg -from oslo_utils import encodeutils -import webob - -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.common import exception -from senlin.tests.unit.common import base - -CONF = cfg.CONF - - -class RequestTest(base.SenlinTestCase): - - def test_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') - self.assertRaises(exception.InvalidContentType, - request.get_content_type, ('application/xml')) - - def test_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "text/html" - self.assertRaises(exception.InvalidContentType, - request.get_content_type, ('application/xml')) - - def test_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type(('application/json')) - self.assertEqual("application/json", result) - - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_xml_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_from_accept_json_xml_quality(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = ("application/json; q=0.3, " - "application/xml; q=0.9") - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual("application/json", result) - - -class ResourceTest(base.SenlinTestCase): - - def test_get_action_args(self): - env = { - 'wsgiorg.routing_args': [ - None, - { - 'controller': None, - 'format': None, - 'action': 'update', - 'id': 12, - }, - ], - } - - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None).get_action_args(env) - - self.assertEqual(expected, actual) - - def test_get_action_args_invalid_index(self): - env = {'wsgiorg.routing_args': []} - expected = {} - actual = wsgi.Resource(None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_get_action_args_del_controller_error(self): - actions = {'format': None, - 'action': 'update', - 'id': 12} - env = {'wsgiorg.routing_args': [None, actions]} - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_get_action_args_del_format_error(self): - actions = {'action': 'update', 'id': 12} - env = {'wsgiorg.routing_args': [None, actions]} - expected = {'action': 'update', 'id': 12} - actual = wsgi.Resource(None).get_action_args(env) - self.assertEqual(expected, actual) - - def test_dispatch(self): - class Controller(object): - def index(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None) - actual = resource.dispatch(Controller(), 'index', 'on', pants='off') - expected = ('on', 'off') - self.assertEqual(expected, actual) - - def test_dispatch_default(self): - class Controller(object): - def default(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None) - actual = resource.dispatch(Controller(), 'index', 'on', pants='off') - expected = ('on', 'off') - self.assertEqual(expected, actual) - - def test_dispatch_no_default(self): - class Controller(object): - def show(self, shirt, pants=None): - return (shirt, pants) - - resource = wsgi.Resource(None) - self.assertRaises(AttributeError, resource.dispatch, Controller(), - 'index', 'on', pants='off') - - def test_resource_call_error_handle(self): - class Controller(object): - def delete(self, req, identity): - return (req, identity) - - actions = {'action': 'delete', 'id': 12, 'body': 'data'} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - request.body = encodeutils.safe_encode('{"foo" : "value"}') - resource = wsgi.Resource(Controller()) - - # The Resource does not throw webob.HTTPExceptions, since they - # would be considered responses by wsgi and the request flow would end, - # instead they are wrapped so they can reach the fault application - # where they are converted to a JSON response - e = self.assertRaises(exception.HTTPExceptionDisguise, - resource, request) - self.assertIsInstance(e.exc, webob.exc.HTTPBadRequest) - - @mock.patch.object(wsgi, 'translate_exception') - def test_resource_call_error_handle_localized(self, mock_translate): - class Controller(object): - def delete(self, req, identity): - return (req, identity) - - def fake_translate_exception(ex, locale): - return translated_ex - - mock_translate.side_effect = fake_translate_exception - actions = {'action': 'delete', 'id': 12, 'body': 'data'} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - request.body = encodeutils.safe_encode('{"foo" : "value"}') - message_es = "No Encontrado" - translated_ex = webob.exc.HTTPBadRequest(message_es) - - resource = wsgi.Resource(Controller()) - - e = self.assertRaises(exception.HTTPExceptionDisguise, - resource, request) - self.assertEqual(message_es, str(e.exc)) - - def test_resource_call_with_version_header(self): - class Controller(object): - def dance(self, req): - return {'foo': 'bar'} - - actions = {'action': 'dance'} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - request.version_request = vr.APIVersionRequest('1.0') - - resource = wsgi.Resource(Controller()) - resp = resource(request) - self.assertEqual('{"foo": "bar"}', encodeutils.safe_decode(resp.body)) - self.assertTrue(hasattr(resp, 'headers')) - expected = 'clustering 1.0' - self.assertEqual(expected, resp.headers['OpenStack-API-Version']) - self.assertEqual('OpenStack-API-Version', resp.headers['Vary']) - - -class ControllerTest(base.SenlinTestCase): - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_init(self, mock_client): - x_client = mock.Mock() - mock_client.return_value = x_client - data = mock.Mock() - - c = wsgi.Controller(data) - - self.assertEqual(data, c.options) - self.assertEqual(x_client, c.rpc_client) - - def test_default(self): - data = mock.Mock() - c = wsgi.Controller(data) - - self.assertRaises(webob.exc.HTTPNotFound, c.default, mock.Mock()) - - -class ResourceExceptionHandlingTest(base.SenlinTestCase): - scenarios = [ - ('client_exceptions', dict( - exception=exception.NotAuthenticated, - exception_catch=exception.NotAuthenticated)), - ('webob_bad_request', dict( - exception=webob.exc.HTTPBadRequest, - exception_catch=exception.HTTPExceptionDisguise)), - ('webob_not_found', dict( - exception=webob.exc.HTTPNotFound, - exception_catch=exception.HTTPExceptionDisguise)), - ] - - def test_resource_client_exceptions_dont_log_error(self): - class Controller(object): - def __init__(self, exception_to_raise): - self.exception_to_raise = exception_to_raise - - def raise_exception(self, req, body): - raise self.exception_to_raise() - actions = {'action': 'raise_exception', 'body': 'data'} - env = {'wsgiorg.routing_args': [None, actions]} - request = wsgi.Request.blank('/tests/123', environ=env) - request.body = encodeutils.safe_encode('{"foo": "value"}') - resource = wsgi.Resource(Controller(self.exception)) - e = self.assertRaises(self.exception_catch, resource, request) - e = e.exc if hasattr(e, 'exc') else e - self.assertNotIn(str(e), self.LOG.output) - - -class GetSocketTestCase(base.SenlinTestCase): - - def setUp(self): - super(GetSocketTestCase, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - "senlin.api.common.wsgi.get_bind_addr", - lambda x, y: ('192.168.0.13', 1234))) - addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)), - (2, 2, 17, '', ('192.168.0.13', 80)), - (2, 3, 0, '', ('192.168.0.13', 80))] - self.useFixture(fixtures.MonkeyPatch( - "senlin.api.common.wsgi.socket.getaddrinfo", - lambda *x: addr_info_list)) - self.useFixture(fixtures.MonkeyPatch( - "senlin.api.common.wsgi.time.time", - mock.Mock(side_effect=[0, 1, 5, 10, 20, 35]))) - wsgi.cfg.CONF.senlin_api.cert_file = '/etc/ssl/cert' - wsgi.cfg.CONF.senlin_api.key_file = '/etc/ssl/key' - wsgi.cfg.CONF.senlin_api.ca_file = '/etc/ssl/ca_cert' - wsgi.cfg.CONF.senlin_api.tcp_keepidle = 600 - - def test_correct_configure_socket(self): - mock_socket = mock.Mock() - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.ssl.wrap_socket', - mock_socket)) - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.eventlet.listen', - lambda *x, **y: mock_socket)) - server = wsgi.Server(name='senlin-api', conf=cfg.CONF.senlin_api) - server.default_port = 1234 - server.configure_socket() - self.assertIn(mock.call.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1), - mock_socket.mock_calls) - self.assertIn(mock.call.setsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE, 1), - mock_socket.mock_calls) - if hasattr(socket, 'TCP_KEEPIDLE'): - self.assertIn(mock.call().setsockopt( - socket.IPPROTO_TCP, - socket.TCP_KEEPIDLE, - wsgi.cfg.CONF.senlin_api.tcp_keepidle), mock_socket.mock_calls) - - def test_get_socket_without_all_ssl_reqs(self): - wsgi.cfg.CONF.senlin_api.key_file = None - self.assertRaises(RuntimeError, - wsgi.get_socket, wsgi.cfg.CONF.senlin_api, 1234) - - def test_get_socket_with_bind_problems(self): - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.eventlet.listen', - mock.Mock(side_effect=( - [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None])))) - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.ssl.wrap_socket', - lambda *x, **y: None)) - - self.assertRaises(RuntimeError, - wsgi.get_socket, wsgi.cfg.CONF.senlin_api, 1234) - - def test_get_socket_with_unexpected_socket_errno(self): - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.eventlet.listen', - mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM)))) - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.ssl.wrap_socket', - lambda *x, **y: None)) - self.assertRaises(wsgi.socket.error, wsgi.get_socket, - wsgi.cfg.CONF.senlin_api, 1234) - - def test_run_server_with_unexpected_socket_errno(self): - mock_server = mock.Mock(side_effect=wsgi.socket.error()) - mock_server.side_effect.errno = socket.errno.ENOMEM - self.useFixture(fixtures.MonkeyPatch( - 'senlin.api.common.wsgi.eventlet.wsgi.server', mock_server)) - - wsgi.cfg.CONF.senlin_api.workers = 1 - wsgi.cfg.CONF.debug = False - server = wsgi.Server(name='senlin-api', conf=cfg.CONF.senlin_api) - server.sock = mock.Mock() - server.application = mock.Mock() - - exc = self.assertRaises(wsgi.socket.error, server.run_server) - self.assertEqual(socket.errno.ENOMEM, exc.errno) - - mock_server.side_effect = wsgi.socket.error() - mock_server.side_effect.errno = socket.errno.EINVAL - - server.run_server() - - -class FakeController(wsgi.Controller): - - @wsgi.Controller.api_version('2.0') - def index(self, req): - return {'foo': 'bar'} - - def foo(self, req): - return {'bar': 'zoo'} - - @wsgi.Controller.api_version('2.0', '3.0') - def dance(self, req): - return {'score': 100} - - @wsgi.Controller.api_version('4.0') # noqa - def dance(self, req): # noqa F811 - return {'score': 60} - - -class MicroversionTest(base.SenlinTestCase): - - def test_versioned_request_empty(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('1.0') - c = FakeController(data) - - ex = self.assertRaises(exception.MethodVersionNotFound, - c.index, request) - self.assertEqual("API version '1.0' is not supported on " - "this method.", str(ex)) - - res = c.foo(request) - self.assertEqual({'bar': 'zoo'}, res) - - ex = self.assertRaises(exception.MethodVersionNotFound, - c.dance, request) - self.assertEqual("API version '1.0' is not supported on " - "this method.", str(ex)) - - def test_versioned_request_lower(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('2.0') - c = FakeController(data) - - res = c.index(request) - self.assertEqual({'foo': 'bar'}, res) - - res = c.foo(request) - self.assertEqual({'bar': 'zoo'}, res) - - res = c.dance(request) - self.assertEqual({'score': 100}, res) - - def test_versioned_request_middle(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('2.5') - c = FakeController(data) - - res = c.index(request) - self.assertEqual({'foo': 'bar'}, res) - - res = c.foo(request) - self.assertEqual({'bar': 'zoo'}, res) - - res = c.dance(request) - self.assertEqual({'score': 100}, res) - - def test_versioned_request_upper(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('3.0') - c = FakeController(data) - - res = c.index(request) - self.assertEqual({'foo': 'bar'}, res) - - res = c.foo(request) - self.assertEqual({'bar': 'zoo'}, res) - - res = c.dance(request) - self.assertEqual({'score': 100}, res) - - def test_versioned_request_too_high(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('3.5') - c = FakeController(data) - - res = c.index(request) - self.assertEqual({'foo': 'bar'}, res) - - res = c.foo(request) - self.assertEqual({'bar': 'zoo'}, res) - - ex = self.assertRaises(exception.MethodVersionNotFound, - c.dance, request) - self.assertEqual("API version '3.5' is not supported on " - "this method.", str(ex)) - - def test_versioned_request_inner_functions(self): - data = mock.Mock() - request = wsgi.Request.blank('/tests/123') - request.version_request = vr.APIVersionRequest('3.0') - c = FakeController(data) - - res = c.dance(request) - self.assertEqual({'score': 100}, res) - - request.version_request = vr.APIVersionRequest('4.0') - res = c.dance(request) - self.assertEqual({'score': 60}, res) diff --git a/senlin/tests/unit/api/middleware/__init__.py b/senlin/tests/unit/api/middleware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/api/middleware/policy/check_admin.json b/senlin/tests/unit/api/middleware/policy/check_admin.json deleted file mode 100644 index 96a15c83c..000000000 --- a/senlin/tests/unit/api/middleware/policy/check_admin.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "context_is_admin": "role:admin" -} diff --git a/senlin/tests/unit/api/middleware/policy/notallowed.json b/senlin/tests/unit/api/middleware/policy/notallowed.json deleted file mode 100644 index 56c481f26..000000000 --- a/senlin/tests/unit/api/middleware/policy/notallowed.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "deny_everybody": "!", - - "clusters:index": "!", - "clusters:create": "!", - "clusters:delete": "!", - "clusters:get": "!", - "clusters:action": "!", - "clusters:update": "!" -} diff --git a/senlin/tests/unit/api/middleware/test_context.py b/senlin/tests/unit/api/middleware/test_context.py deleted file mode 100644 index 2bf8e9a59..000000000 --- a/senlin/tests/unit/api/middleware/test_context.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg -from oslo_config import fixture -from oslo_middleware import request_id -from oslo_policy import opts as policy_opts -import webob - -from senlin.api.common import version_request as vr -from senlin.api.middleware import context -from senlin.common import exception -from senlin.tests.unit.common import base - -policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/" - - -class RequestContextMiddlewareTest(base.SenlinTestCase): - - scenarios = [( - 'empty_headers', - dict( - environ=None, - headers={}, - expected_exception=None, - context_dict={ - 'auth_token': None, - 'auth_token_info': None, - 'auth_url': '', - 'is_admin': False, - 'password': None, - 'roles': [], - 'show_deleted': False, - 'project': None, - 'user': None, - 'user_name': None - }) - ), ( - 'token_creds', - dict( - environ={'keystone.token_info': {'info': 123}}, - headers={ - 'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3', - 'X-Auth-Token': 'atoken2', - 'X-Project-Name': 'my_project2', - 'X-Project-Id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9', - 'X-Auth-Url': 'http://192.0.2.1:5000/v1', - 'X-Roles': 'role1,role2,role3', - }, - expected_exception=None, - context_dict={ - 'auth_token': 'atoken2', - 'auth_token_info': {'info': 123}, - 'auth_url': 'http://192.0.2.1:5000/v1', - 'is_admin': False, - 'password': None, - 'roles': ['role1', 'role2', 'role3'], - 'show_deleted': False, - 'project': 'bb9108c8-62d0-4d92-898c-d644a6af20e9', - 'user': '7a87ff18-31c6-45ce-a186-ec7987f488c3', - 'user_name': None - }) - ), ( - 'malformed_roles', - dict( - environ=None, - headers={ - 'X-Roles': [], - }, - expected_exception=exception.NotAuthenticated) - )] - - def setUp(self): - super(RequestContextMiddlewareTest, self).setUp() - self.fixture = self.useFixture(fixture.Config()) - self.fixture.conf(args=['--config-dir', policy_path]) - policy_opts.set_defaults(cfg.CONF) - cfg.CONF.set_override('policy_file', 'check_admin.json', - group='oslo_policy') - - def test_context_middleware(self): - avr = vr.APIVersionRequest('1.0') - middleware = context.ContextMiddleware(None) - request = webob.Request.blank('/clusters', headers=self.headers, - environ=self.environ) - request.version_request = avr - if self.expected_exception: - self.assertRaises( - self.expected_exception, middleware.process_request, request) - else: - self.assertIsNone(middleware.process_request(request)) - ctx = request.context.to_dict() - for k, v in self.context_dict.items(): - self.assertEqual(v, ctx[k], 'Key %s values do not match' % k) - self.assertIsNotNone(ctx.get('request_id')) - - def test_context_middleware_with_requestid(self): - avr = vr.APIVersionRequest('1.0') - middleware = context.ContextMiddleware(None) - request = webob.Request.blank('/clusters', headers=self.headers, - environ=self.environ) - req_id = 'req-5a63f0d7-1b69-447b-b621-4ea87cc7186d' - request.environ[request_id.ENV_REQUEST_ID] = req_id - request.version_request = avr - if self.expected_exception: - self.assertRaises( - self.expected_exception, middleware.process_request, request) - else: - self.assertIsNone(middleware.process_request(request)) - ctx = request.context.to_dict() - for k, v in self.context_dict.items(): - self.assertEqual(v, ctx[k], 'Key %s values do not match' % k) - self.assertEqual( - ctx.get('request_id'), req_id, - 'Key request_id values do not match') diff --git a/senlin/tests/unit/api/middleware/test_fault.py b/senlin/tests/unit/api/middleware/test_fault.py deleted file mode 100644 index 699296d8d..000000000 --- a/senlin/tests/unit/api/middleware/test_fault.py +++ /dev/null @@ -1,246 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import re -import webob - -from oslo_config import cfg -from oslo_log import log -from oslo_messaging._drivers import common as rpc_common - -import senlin.api.middleware.fault as fault -from senlin.common import exception as senlin_exc -from senlin.tests.unit.common import base - - -class ClusterNotFoundChild(senlin_exc.ResourceNotFound): - pass - - -class ErrorWithNewline(webob.exc.HTTPBadRequest): - pass - - -class FaultMiddlewareTest(base.SenlinTestCase): - def setUp(self): - super(FaultMiddlewareTest, self).setUp() - log.register_options(cfg.CONF) - - def test_disguised_http_exception_with_newline(self): - wrapper = fault.FaultWrapper(None) - newline_error = ErrorWithNewline('Error with \n newline') - msg = wrapper._error(senlin_exc.HTTPExceptionDisguise(newline_error)) - expected = { - 'code': 400, - 'error': { - 'code': 400, - 'message': 'Error with \n newline', - 'type': 'ErrorWithNewline' - }, - 'explanation': 'The server could not comply with the request ' - 'since it is either malformed or otherwise ' - 'incorrect.', - 'title': 'Bad Request' - } - self.assertEqual(expected, msg) - - def test_openstack_exception_with_kwargs(self): - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(senlin_exc.ResourceNotFound(type='cluster', - id='a')) - - expected = { - "code": 404, - "error": { - "code": 404, - "message": "The cluster 'a' could not be found.", - "type": "ResourceNotFound" - }, - "explanation": "The resource could not be found.", - "title": "Not Found" - } - self.assertEqual(expected, msg) - - def test_openstack_exception_without_kwargs(self): - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(senlin_exc.PolicyNotSpecified()) - expected = { - 'code': 500, - 'error': { - 'code': 500, - 'message': 'Policy not specified.', - 'type': 'PolicyNotSpecified' - }, - 'explanation': 'The server has either erred or is incapable of ' - 'performing the requested operation.', - 'title': 'Internal Server Error' - } - self.assertEqual(expected, msg) - - def test_exception_with_non_ascii_chars(self): - # We set debug to true to test the code path for serializing traces too - cfg.CONF.set_override('debug', True) - msg = u'Error with non-ascii chars \x80' - - class TestException(senlin_exc.SenlinException): - msg_fmt = msg - - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(TestException()) - - self.assertEqual(500, msg['code']) - self.assertEqual(500, msg['error']['code']) - self.assertEqual(u'Error with non-ascii chars \x80', - msg['error']['message']) - self.assertEqual('TestException', msg['error']['type']) - self.assertEqual('The server has either erred or is incapable of ' - 'performing the requested operation.', - msg['explanation']) - self.assertEqual('Internal Server Error', msg['title']) - - def test_remote_exception(self): - cfg.CONF.set_override('debug', True) - error = senlin_exc.ResourceNotFound(type='cluster', id='a') - exc_info = (type(error), error, None) - serialized = rpc_common.serialize_remote_exception(exc_info) - remote_error = rpc_common.deserialize_remote_exception( - serialized, ["senlin.common.exception"]) - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(remote_error) - expected_message = str(remote_error).split('\n', 1)[0] - expected = { - 'code': 404, - 'error': { - 'code': 404, - 'message': expected_message, - 'type': 'ResourceNotFound' - }, - 'explanation': 'The resource could not be found.', - 'title': 'Not Found' - } - self.assertEqual(expected, msg) - - def remote_exception_helper(self, name, error): - exc_info = (type(error), error, None) - - serialized = rpc_common.serialize_remote_exception(exc_info) - remote_error = rpc_common.deserialize_remote_exception( - serialized, name) - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(remote_error) - expected = { - 'code': 500, - 'error': { - 'code': 500, - 'message': msg['error']['message'], - 'type': 'RemoteError' - }, - 'explanation': msg['explanation'], - 'title': 'Internal Server Error' - } - self.assertEqual(expected, msg) - - def test_all_remote_exceptions(self): - for name, obj in inspect.getmembers( - senlin_exc, lambda x: inspect.isclass(x) and issubclass( - x, senlin_exc.SenlinException)): - - if '__init__' in obj.__dict__: - if obj == senlin_exc.SenlinException: - continue - elif obj == senlin_exc.Error: - error = obj('Error') - elif obj == senlin_exc.ResourceNotFound: - error = obj() - else: - continue - self.remote_exception_helper(name, error) - continue - - if hasattr(obj, 'msg_fmt'): - kwargs = {} - spec_names = re.findall('%\((\w+)\)([cdeEfFgGinorsxX])', - obj.msg_fmt) - - for key, convtype in spec_names: - if convtype == 'r' or convtype == 's': - kwargs[key] = '"' + key + '"' - else: - # this is highly unlikely - raise Exception("test needs additional conversion" - " type added due to %s exception" - " using '%c' specifier" % ( - obj, convtype)) - - error = obj(**kwargs) - self.remote_exception_helper(name, error) - - def test_should_not_ignore_parent_classes(self): - wrapper = fault.FaultWrapper(None) - - msg = wrapper._error(ClusterNotFoundChild(type='cluster', id='a')) - expected = { - "code": 404, - "error": { - "code": 404, - "message": "The cluster 'a' could not be found.", - "type": "ClusterNotFoundChild" - }, - "explanation": "The resource could not be found.", - "title": "Not Found" - } - self.assertEqual(expected, msg) - - def test_internal_server_error_when_exception_and_parents_not_mapped(self): - wrapper = fault.FaultWrapper(None) - - class NotMappedException(Exception): - pass - - msg = wrapper._error(NotMappedException('A message')) - expected = { - "code": 500, - "error": { - "code": 500, - "message": "A message", - "type": "NotMappedException" - }, - "explanation": ("The server has either erred or is incapable " - "of performing the requested operation."), - "title": "Internal Server Error" - } - self.assertEqual(expected, msg) - - def test_should_not_ignore_parent_classes_even_for_remote_ones(self): - cfg.CONF.set_override('debug', True) - - error = ClusterNotFoundChild(type='cluster', id='a') - exc_info = (type(error), error, None) - serialized = rpc_common.serialize_remote_exception(exc_info) - remote_error = rpc_common.deserialize_remote_exception( - serialized, ["senlin.tests.unit.api.middleware.test_fault"]) - - wrapper = fault.FaultWrapper(None) - msg = wrapper._error(remote_error) - expected_message = str(remote_error).split('\n', 1)[0] - expected = { - 'code': 404, - 'error': { - 'code': 404, - 'message': expected_message, - 'type': 'ClusterNotFoundChild' - }, - 'explanation': 'The resource could not be found.', - 'title': 'Not Found' - } - self.assertEqual(expected, msg) diff --git a/senlin/tests/unit/api/middleware/test_middleware_filters.py b/senlin/tests/unit/api/middleware/test_middleware_filters.py deleted file mode 100644 index 9b9ce67c4..000000000 --- a/senlin/tests/unit/api/middleware/test_middleware_filters.py +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.api import middleware as mw -from senlin.api.middleware import context -from senlin.api.middleware import fault -from senlin.api.middleware import trust -from senlin.api.middleware import version_negotiation as vn -from senlin.api.middleware import webhook -from senlin.tests.unit.common import base - - -class MiddlewareFilterTest(base.SenlinTestCase): - - def setUp(self): - super(MiddlewareFilterTest, self).setUp() - - self.app = mock.Mock() - self.conf = mock.Mock() - self.local_conf = dict(key='value') - - @mock.patch.object(vn, 'VersionNegotiationFilter') - def test_version_negotiation_filter(self, mock_vnf): - exp = mock.Mock() - mock_vnf.return_value = exp - - actual = mw.version_filter(self.app, self.conf, **self.local_conf) - - self.assertEqual(exp, actual) - mock_vnf.assert_called_once_with(self.app, self.conf) - - @mock.patch.object(fault, 'FaultWrapper') - def test_faultwrap_filter(self, mock_fw): - exp = mock.Mock() - mock_fw.return_value = exp - - actual = mw.fault_filter(self.app, self.conf, **self.local_conf) - - self.assertEqual(exp, actual) - mock_fw.assert_called_once_with(self.app) - - @mock.patch.object(context, 'ContextMiddleware') - def test_contextmiddlware_filter(self, mock_ctx): - exp = mock.Mock() - mock_ctx.return_value = exp - - actual = mw.context_filter(self.app, self.conf, **self.local_conf) - - self.assertEqual(exp, actual) - mock_ctx.assert_called_once_with(self.app) - - @mock.patch.object(trust, 'TrustMiddleware') - def test_trustmiddlware_filter(self, mock_trust): - exp = mock.Mock() - mock_trust.return_value = exp - - actual = mw.trust_filter(self.app, self.conf, **self.local_conf) - - self.assertEqual(exp, actual) - mock_trust.assert_called_once_with(self.app) - - @mock.patch.object(webhook, 'WebhookMiddleware') - def test_webhookmiddlware_filter(self, mock_wh): - exp = mock.Mock() - mock_wh.return_value = exp - - actual = mw.webhook_filter(self.app, self.conf, **self.local_conf) - - self.assertEqual(exp, actual) - mock_wh.assert_called_once_with(self.app) diff --git a/senlin/tests/unit/api/middleware/test_trust.py b/senlin/tests/unit/api/middleware/test_trust.py deleted file mode 100644 index 82901c5ec..000000000 --- a/senlin/tests/unit/api/middleware/test_trust.py +++ /dev/null @@ -1,212 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.api.middleware import trust -from senlin.common import context -from senlin.common import exception -from senlin.objects.requests import credentials as vorc -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestTrustMiddleware(base.SenlinTestCase): - - def setUp(self): - super(TestTrustMiddleware, self).setUp() - self.context = utils.dummy_context() - self.req = mock.Mock - self.req.context = self.context - self.middleware = trust.TrustMiddleware(None) - - @mock.patch("senlin.rpc.client.get_engine_client") - def test_get_trust_already_exists(self, mock_rpc): - x_cred = {'trust': 'FAKE_TRUST_ID'} - x_rpc = mock.Mock() - x_rpc.call.return_value = x_cred - mock_rpc.return_value = x_rpc - - result = self.middleware._get_trust(self.req) - - self.assertEqual('FAKE_TRUST_ID', result) - mock_rpc.assert_called_once_with() - x_rpc.call.assert_called_once_with(self.context, 'credential_get', - mock.ANY) - request = x_rpc.call.call_args[0][2] - self.assertIsInstance(request, vorc.CredentialGetRequest) - self.assertEqual(self.context.user_id, request.user) - self.assertEqual(self.context.project_id, request.project) - - @mock.patch.object(context, "get_service_credentials") - @mock.patch("senlin.drivers.base.SenlinDriver") - @mock.patch("senlin.rpc.client.get_engine_client") - def test_get_trust_bad(self, mock_rpc, mock_driver, mock_creds): - x_cred = {'foo': 'bar'} - x_rpc = mock.Mock() - x_rpc.call.return_value = x_cred - mock_rpc.return_value = x_rpc - - x_svc_cred = {'uid': 'FAKE_ID', 'passwd': 'FAKE_PASS'} - mock_creds.return_value = x_svc_cred - x_admin_id = 'FAKE_ADMIN_ID' - x_trust = mock.Mock(id='FAKE_TRUST_ID') - mock_keystone = mock.Mock() - mock_keystone.get_user_id.return_value = x_admin_id - mock_keystone.trust_get_by_trustor.return_value = x_trust - x_driver = mock.Mock() - x_driver.identity.return_value = mock_keystone - mock_driver.return_value = x_driver - - result = self.middleware._get_trust(self.req) - self.assertEqual('FAKE_TRUST_ID', result) - mock_calls = [mock.call(self.context, 'credential_get', mock.ANY), - mock.call(self.context, 'credential_create', mock.ANY)] - x_rpc.call.assert_has_calls(mock_calls) - request = x_rpc.call.call_args_list[0][0][2] - self.assertIsInstance(request, vorc.CredentialGetRequest) - self.assertEqual(self.context.user_id, request.user) - self.assertEqual(self.context.project_id, request.project) - request = x_rpc.call.call_args_list[1][0][2] - self.assertIsInstance(request, vorc.CredentialCreateRequest) - expected_cred = { - 'openstack': {'trust': 'FAKE_TRUST_ID'} - } - self.assertEqual(expected_cred, request.cred) - mock_driver.assert_called_once_with() - x_driver.identity.assert_called_once_with({ - 'auth_url': self.context.auth_url, - 'user_id': self.context.user_id, - 'token': self.context.auth_token, - }) - mock_creds.assert_called_once_with() - mock_keystone.get_user_id.assert_called_once_with( - uid='FAKE_ID', passwd='FAKE_PASS') - mock_keystone.trust_get_by_trustor.assert_called_once_with( - self.context.user_id, 'FAKE_ADMIN_ID', self.context.project_id) - - @mock.patch.object(context, "get_service_credentials") - @mock.patch("senlin.drivers.base.SenlinDriver") - @mock.patch("senlin.rpc.client.get_engine_client") - def test_get_trust_not_found(self, mock_rpc, mock_driver, mock_creds): - x_rpc = mock.Mock() - x_rpc.call.return_value = None - mock_rpc.return_value = x_rpc - - x_svc_cred = {'uid': 'FAKE_ID', 'passwd': 'FAKE_PASS'} - mock_creds.return_value = x_svc_cred - x_admin_id = 'FAKE_ADMIN_ID' - x_trust = mock.Mock(id='FAKE_TRUST_ID') - mock_keystone = mock.Mock() - mock_keystone.get_user_id.return_value = x_admin_id - mock_keystone.trust_get_by_trustor.return_value = x_trust - x_driver = mock.Mock() - x_driver.identity.return_value = mock_keystone - mock_driver.return_value = x_driver - - result = self.middleware._get_trust(self.req) - self.assertEqual('FAKE_TRUST_ID', result) - mock_calls = [mock.call(self.context, 'credential_get', mock.ANY), - mock.call(self.context, 'credential_create', mock.ANY)] - x_rpc.call.assert_has_calls(mock_calls) - mock_rpc.assert_called_once_with() - mock_driver.assert_called_once_with() - x_driver.identity.assert_called_once_with({ - 'auth_url': self.context.auth_url, - 'user_id': self.context.user_id, - 'token': self.context.auth_token, - }) - mock_creds.assert_called_once_with() - mock_keystone.get_user_id.assert_called_once_with( - uid='FAKE_ID', passwd='FAKE_PASS') - mock_keystone.trust_get_by_trustor.assert_called_once_with( - self.context.user_id, 'FAKE_ADMIN_ID', self.context.project_id) - - @mock.patch.object(context, "get_service_credentials") - @mock.patch("senlin.drivers.base.SenlinDriver") - @mock.patch("senlin.rpc.client.get_engine_client") - def test_get_trust_do_create(self, mock_rpc, mock_driver, mock_creds): - x_rpc = mock.Mock() - x_rpc.call.return_value = None - mock_rpc.return_value = x_rpc - - x_svc_cred = {'uid': 'FAKE_ID', 'passwd': 'FAKE_PASS'} - mock_creds.return_value = x_svc_cred - x_admin_id = 'FAKE_ADMIN_ID' - mock_keystone = mock.Mock() - mock_keystone.get_user_id.return_value = x_admin_id - x_trust = mock.Mock(id='FAKE_TRUST_ID') - mock_keystone.trust_create.return_value = x_trust - err = exception.InternalError(code=400, message='Boom') - mock_keystone.trust_get_by_trustor.side_effect = err - x_driver = mock.Mock() - x_driver.identity.return_value = mock_keystone - mock_driver.return_value = x_driver - - result = self.middleware._get_trust(self.req) - self.assertEqual('FAKE_TRUST_ID', result) - mock_calls = [mock.call(self.context, 'credential_get', mock.ANY), - mock.call(self.context, 'credential_create', mock.ANY)] - x_rpc.call.assert_has_calls(mock_calls) - mock_driver.assert_called_once_with() - x_driver.identity.assert_called_once_with({ - 'auth_url': self.context.auth_url, - 'user_id': self.context.user_id, - 'token': self.context.auth_token, - }) - mock_creds.assert_called_once_with() - mock_keystone.get_user_id.assert_called_once_with( - uid='FAKE_ID', passwd='FAKE_PASS') - mock_keystone.trust_get_by_trustor.assert_called_once_with( - self.context.user_id, 'FAKE_ADMIN_ID', self.context.project_id) - mock_keystone.trust_create.assert_called_once_with( - self.context.user_id, 'FAKE_ADMIN_ID', self.context.project_id, - self.context.roles) - - @mock.patch.object(context, "get_service_credentials") - @mock.patch("senlin.drivers.base.SenlinDriver") - @mock.patch("senlin.rpc.client.get_engine_client") - def test_get_trust_fatal(self, mock_rpc, mock_driver, mock_creds): - x_rpc = mock.Mock() - x_rpc.call.return_value = None - mock_rpc.return_value = x_rpc - - x_svc_cred = {'uid': 'FAKE_ID', 'passwd': 'FAKE_PASS'} - mock_creds.return_value = x_svc_cred - x_admin_id = 'FAKE_ADMIN_ID' - mock_keystone = mock.Mock() - mock_keystone.get_user_id.return_value = x_admin_id - err = exception.InternalError(code=500, message='Boom') - mock_keystone.trust_get_by_trustor.side_effect = err - x_driver = mock.Mock() - x_driver.identity.return_value = mock_keystone - mock_driver.return_value = x_driver - - ex = self.assertRaises(exception.InternalError, - self.middleware._get_trust, - self.req) - - self.assertEqual('Boom', str(ex)) - mock_rpc.assert_called_once_with() - x_rpc.call.assert_called_once_with(self.context, 'credential_get', - mock.ANY) - mock_driver.assert_called_once_with() - x_driver.identity.assert_called_once_with({ - 'auth_url': self.context.auth_url, - 'user_id': self.context.user_id, - 'token': self.context.auth_token, - }) - mock_creds.assert_called_once_with() - mock_keystone.get_user_id.assert_called_once_with( - uid='FAKE_ID', passwd='FAKE_PASS') - mock_keystone.trust_get_by_trustor.assert_called_once_with( - self.context.user_id, 'FAKE_ADMIN_ID', self.context.project_id) diff --git a/senlin/tests/unit/api/middleware/test_version_negotiation.py b/senlin/tests/unit/api/middleware/test_version_negotiation.py deleted file mode 100644 index a7f33a781..000000000 --- a/senlin/tests/unit/api/middleware/test_version_negotiation.py +++ /dev/null @@ -1,284 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import webob - -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.api.middleware import version_negotiation as vn -from senlin.common import exception -from senlin.tests.unit.common import base - - -@mock.patch("senlin.api.openstack.versions.Controller") -class VersionNegotiationTest(base.SenlinTestCase): - - def test_get_version_controller(self, mock_vc): - gvc = mock_vc.return_value - xvc = mock.Mock() - gvc.get_controller = mock.Mock(return_value=xvc) - vnf = vn.VersionNegotiationFilter(None, None) - request = webob.Request({}) - - res = vnf._get_controller('v1.0', request) - - self.assertEqual(xvc, res) - self.assertEqual(1, request.environ['api.major']) - self.assertEqual(0, request.environ['api.minor']) - gvc.get_controller.assert_called_once_with('1.0') - - def test_get_version_controller_shorter_version(self, mock_vc): - gvc = mock_vc.return_value - xvc = mock.Mock() - gvc.get_controller = mock.Mock(return_value=xvc) - vnf = vn.VersionNegotiationFilter(None, None) - request = webob.Request({}) - - res = vnf._get_controller('v1', request) - - self.assertEqual(xvc, res) - self.assertEqual(1, request.environ['api.major']) - self.assertEqual(0, request.environ['api.minor']) - gvc.get_controller.assert_called_once_with('1.0') - - def test_get_controller_not_match_version(self, mock_vc): - gvc = mock_vc.return_value - gvc.get_controller = mock.Mock(return_value=None) - vnf = vn.VersionNegotiationFilter(None, None) - request = webob.Request({}) - - res = vnf._get_controller("invalid", request) - - self.assertIsNone(res) - self.assertEqual(0, gvc.get_controller.call_count) - - def test_request_path_is_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - request = webob.Request({'PATH_INFO': 'versions'}) - - response = vnf.process_request(request) - - self.assertIs(mock_vc.return_value, response) - - def test_request_path_is_empty(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - request = webob.Request({'PATH_INFO': '/'}) - - response = vnf.process_request(request) - - self.assertIs(mock_vc.return_value, response) - - def test_request_path_contains_valid_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - gvc = mock_vc.return_value - x_controller = mock.Mock() - gvc.get_controller = mock.Mock(return_value=x_controller) - mock_check = self.patchobject(vnf, '_check_version_request') - major = 1 - minor = 0 - request = webob.Request({'PATH_INFO': 'v1.0/resource'}) - - response = vnf.process_request(request) - - self.assertIsNone(response) - self.assertEqual(major, request.environ['api.major']) - self.assertEqual(minor, request.environ['api.minor']) - gvc.get_controller.assert_called_once_with('1.0') - mock_check.assert_called_once_with(request, x_controller) - - def test_removes_version_from_request_path(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - expected_path = 'resource' - request = webob.Request({'PATH_INFO': 'v1.0/%s' % expected_path}) - - response = vnf.process_request(request) - - self.assertIsNone(response) - self.assertEqual(expected_path, request.path_info_peek()) - - def test_simple_version_on_request_path(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - fake_vc = mock.Mock(return_value={'foo': 'bar'}) - self.patchobject(vnf.versions_app, 'get_controller', - return_value=fake_vc) - request = webob.Request({'PATH_INFO': 'v1'}) - - response = vnf.process_request(request) - - self.assertEqual({'foo': 'bar'}, response) - - def test_full_version_on_request_path(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - fake_vc = mock.Mock(return_value={'foo': 'bar'}) - self.patchobject(vnf.versions_app, 'get_controller', - return_value=fake_vc) - request = webob.Request({'PATH_INFO': 'v1.0'}) - - response = vnf.process_request(request) - - self.assertEqual({'foo': 'bar'}, response) - - def test_request_path_contains_unknown_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - gvc = mock_vc.return_value - gvc.get_controller = mock.Mock(return_value=None) - self.patchobject(vnf, '_check_version_request') - - request = webob.Request({'PATH_INFO': 'v2.0/resource'}) - request.headers['Accept'] = '*/*' - - response = vnf.process_request(request) - - self.assertIs(mock_vc.return_value, response) - - def test_accept_header_contains_valid_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - major = 1 - minor = 0 - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0' - - response = vnf.process_request(request) - - self.assertIsNone(response) - self.assertEqual(major, request.environ['api.major']) - self.assertEqual(minor, request.environ['api.minor']) - - def test_accept_header_contains_simple_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - fake_vc = mock.Mock(return_value={'foo': 'bar'}) - self.patchobject(vnf.versions_app, 'get_controller', - return_value=fake_vc) - major = 1 - minor = 0 - request = webob.Request({'PATH_INFO': ''}) - request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0' - - response = vnf.process_request(request) - - self.assertEqual(major, request.environ['api.major']) - self.assertEqual(minor, request.environ['api.minor']) - self.assertEqual({'foo': 'bar'}, response) - - def test_accept_header_contains_unknown_version(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - self.patchobject(vnf, '_check_version_request') - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers['Accept'] = 'application/vnd.openstack.clustering-v2.0' - - response = vnf.process_request(request) - - self.assertIsNone(response) - - request.headers['Accept'] = 'application/vnd.openstack.clustering-vab' - response = vnf.process_request(request) - - self.assertIsInstance(response, webob.exc.HTTPNotFound) - - def test_no_URI_version_accept_with_invalid_MIME_type(self, mock_vc): - vnf = vn.VersionNegotiationFilter(None, None) - gvc = mock_vc.return_value - gvc.get_controller = mock.Mock(side_effect=[None, None]) - self.patchobject(vnf, '_check_version_request') - - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers['Accept'] = 'application/invalidMIMEType' - - response = vnf.process_request(request) - - self.assertIsInstance(response, webob.exc.HTTPNotFound) - - request.headers['Accept'] = '' - response = vnf.process_request(request) - self.assertEqual(gvc, response) - - def test_check_version_request(self, mock_vc): - controller = mock.Mock() - minv = vr.APIVersionRequest('1.0') - maxv = vr.APIVersionRequest('1.3') - controller.min_api_version = mock.Mock(return_value=minv) - controller.max_api_version = mock.Mock(return_value=maxv) - - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers[wsgi.API_VERSION_KEY] = 'clustering 1.0,compute 2.0' - vnf = vn.VersionNegotiationFilter(None, None) - - vnf._check_version_request(request, controller) - self.assertIsNotNone(request.version_request) - expected = vr.APIVersionRequest('1.0') - self.assertEqual(expected, request.version_request) - - def test_check_version_request_default(self, mock_vc): - controller = mock.Mock() - controller.DEFAULT_API_VERSION = "1.0" - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers[wsgi.API_VERSION_KEY] = 'compute 2.0' - vnf = vn.VersionNegotiationFilter(None, None) - - vnf._check_version_request(request, controller) - - self.assertIsNotNone(request.version_request) - expected = vr.APIVersionRequest(controller.DEFAULT_API_VERSION) - self.assertEqual(expected, request.version_request) - - def test_check_version_request_invalid_format(self, mock_vc): - controller = mock.Mock() - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.03' - vnf = vn.VersionNegotiationFilter(None, None) - - ex = self.assertRaises(webob.exc.HTTPBadRequest, - vnf._check_version_request, - request, controller) - self.assertEqual("API Version String '2.03' is of invalid format. It " - "must be of format 'major.minor'.", - str(ex)) - - def test_check_version_request_invalid_version(self, mock_vc): - controller = mock.Mock() - minv = vr.APIVersionRequest('1.0') - maxv = vr.APIVersionRequest('1.100') - controller.min_api_version = mock.Mock(return_value=minv) - controller.max_api_version = mock.Mock(return_value=maxv) - - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.3' - vnf = vn.VersionNegotiationFilter(None, None) - - ex = self.assertRaises(exception.InvalidGlobalAPIVersion, - vnf._check_version_request, - request, controller) - expected = ("Version '2.3' is not supported by the API. Minimum is " - "'%(min_ver)s' and maximum is '%(max_ver)s'." % - {'min_ver': str(minv), 'max_ver': str(maxv)}) - self.assertEqual(expected, str(ex)) - - def test_check_version_request_latest(self, mock_vc): - controller = mock.Mock() - controller.max_api_version = mock.Mock(return_value='12.34') - - request = webob.Request({'PATH_INFO': 'resource'}) - request.headers[wsgi.API_VERSION_KEY] = 'clustering Latest' - vnf = vn.VersionNegotiationFilter(None, None) - - vnf._check_version_request(request, controller) - - self.assertIsNotNone(request.version_request) - expected = '12.34' - self.assertEqual(expected, request.version_request) diff --git a/senlin/tests/unit/api/middleware/test_webhook.py b/senlin/tests/unit/api/middleware/test_webhook.py deleted file mode 100644 index 30915f861..000000000 --- a/senlin/tests/unit/api/middleware/test_webhook.py +++ /dev/null @@ -1,268 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_utils import uuidutils -import webob - -from senlin.api.common import util as common_util -from senlin.api.common import version_request as vr -from senlin.api.middleware import webhook as webhook_middleware -from senlin.common import context -from senlin.common import exception -from senlin.drivers import base as driver_base -from senlin.rpc import client as rpc -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestWebhookMiddleware(base.SenlinTestCase): - - def setUp(self): - super(TestWebhookMiddleware, self).setUp() - self.ctx = utils.dummy_context() - self.middleware = webhook_middleware.WebhookMiddleware(None) - - self.url_slices = { - '00_url_base': 'http://HOST_IP:PORT/v1', - '01_webhook_str': '/webhooks/', - '02_webhook_id': 'WEBHOOK_ID', - '03_trigger_str': '/trigger?', - '04_version': 'V=2', - '05_params': '&key=TEST_KEY', - } - self.credential = { - 'auth_url': 'TEST_URL', - 'user_id': '123', - 'password': 'abc' - } - - def _generate_url(self): - slices = sorted(self.url_slices.items(), key=lambda d: d[0]) - return ''.join(s[1] for s in slices) - - def test_parse_url(self): - # Get webhook_id correctly - res = self.middleware._parse_url(self._generate_url()) - self.assertEqual(('WEBHOOK_ID', {'key': 'TEST_KEY'}), res) - - def test_parse_url_version_provided_no_key(self): - # The structure //trigger?V=1 should be valid - self.url_slices.pop('05_params') - - res = self.middleware._parse_url(self._generate_url()) - self.assertEqual(('WEBHOOK_ID', {}), res) - - def test_parse_url_no_version_provided_no_key_provided(self): - # The structure //trigger should be invalid - # because version is missing - self.url_slices.pop('04_version') - self.url_slices.pop('05_params') - - ex = self.assertRaises(webob.exc.HTTPBadRequest, - self.middleware._parse_url, - self._generate_url()) - - self.assertEqual("V query parameter is required in webhook trigger " - "URL", str(ex)) - - def test_parse_url_no_version_provided_key_provided(self): - # The structure //trigger?key=value should be invalid - # because version is missing - self.url_slices.pop('04_version') - self.url_slices.pop('05_params') - self.url_slices['05_params'] = 'key=TEST_KEY' - - ex = self.assertRaises(webob.exc.HTTPBadRequest, - self.middleware._parse_url, - self._generate_url()) - - self.assertEqual("V query parameter is required in webhook trigger " - "URL", str(ex)) - - def test_parse_url_webhooks_not_found(self): - # String 'webhooks' is not found in url - self.url_slices['01_webhook_str'] = '/foo/' - res = self.middleware._parse_url(self._generate_url()) - self.assertIsNone(res) - - def test_parse_url_trigger_not_found(self): - # String 'trigger' is not found in url - self.url_slices['03_trigger_str'] = '/foo?' - res = self.middleware._parse_url(self._generate_url()) - self.assertIsNone(res) - - def test_parse_url_illegal_format(self): - # The structure //trigger?key=value is not matched - self.url_slices['03_trigger_str'] = 'trigger?' - res = self.middleware._parse_url(self._generate_url()) - self.assertIsNone(res) - - def test_parse_url_no_trigger_word(self): - # Bottom string of the url does not start with 'trigger' - self.url_slices['03_trigger_str'] = '/foo-trigger?' - res = self.middleware._parse_url(self._generate_url()) - self.assertIsNone(res) - - @mock.patch.object(driver_base, 'SenlinDriver') - def test_get_token_succeeded(self, mock_senlindriver): - class FakeAccessInfo(object): - def __init__(self, auth_token): - self.auth_token = auth_token - - sd = mock.Mock() - sd.identity.get_token.return_value = 'TEST_TOKEN' - mock_senlindriver.return_value = sd - - token = self.middleware._get_token(**self.credential) - self.assertEqual('TEST_TOKEN', token) - - @mock.patch.object(driver_base, 'SenlinDriver') - def test_get_token_failed(self, mock_senlindriver): - self.credential['webhook_id'] = 'WEBHOOK_ID' - - sd = mock.Mock() - sd.identity.get_token.side_effect = Exception() - mock_senlindriver.return_value = sd - - self.assertRaises(exception.Forbidden, self.middleware._get_token, - **self.credential) - - @mock.patch.object(common_util, 'parse_request') - @mock.patch.object(context, 'RequestContext') - @mock.patch.object(rpc, 'get_engine_client') - def test_process_request(self, mock_client, mock_ctx, mock_parse): - cfg.CONF.set_override('auth_url', 'AUTH_URL', group='authentication') - cfg.CONF.set_override('service_username', 'USERNAME', - group='authentication') - cfg.CONF.set_override('service_user_domain', 'DOMAIN', - group='authentication') - cfg.CONF.set_override('service_project_domain', 'DOMAIN1', - group='authentication') - cfg.CONF.set_override('service_password', 'PASSWORD', - group='authentication') - cfg.CONF.set_override('verify_ssl', False, - group='authentication') - cfg.CONF.set_override('interface', 'admin', - group='authentication') - - req = mock.Mock() - req.method = 'POST' - req.url = 'http://url1/v1' - req.script_name = '/v1' - req.params = {'key': 'FAKE_KEY'} - req.headers = {} - req.version_request = vr.APIVersionRequest('1.0') - - rpcc = mock.Mock() - fake_receiver = { - 'id': 'FAKE_ID', - 'actor': {'foo': 'bar'} - } - rpcc.call.return_value = fake_receiver - mock_client.return_value = rpcc - dbctx = mock.Mock() - mock_ctx.return_value = dbctx - - obj = mock.Mock() - mock_parse.return_value = obj - - fake_return = ('WEBHOOK', {}) - mock_extract = self.patchobject(self.middleware, '_parse_url', - return_value=fake_return) - mock_token = self.patchobject(self.middleware, '_get_token', - return_value='FAKE_TOKEN') - - res = self.middleware.process_request(req) - self.assertIsNone(res) - - self.assertEqual('FAKE_TOKEN', req.headers['X-Auth-Token']) - mock_extract.assert_called_once_with('http://url1/v1') - mock_token.assert_called_once_with( - auth_url='AUTH_URL', password='PASSWORD', username='USERNAME', - user_domain_name='DOMAIN', foo='bar', verify=False, - project_domain_name='DOMAIN1', interface='admin') - - mock_parse.assert_called_once_with('ReceiverGetRequest', req, - {'identity': 'WEBHOOK'}) - rpcc.call.assert_called_with(dbctx, 'receiver_get', obj) - - def test_process_request_method_not_post(self): - # Request method is not POST - req = mock.Mock() - req.method = 'GET' - res = self.middleware.process_request(req) - self.assertIsNone(res) - self.assertNotIn('X-Auth-Token', req.headers) - - def test_process_request_bad_format(self): - # no webhook_id extracted - req = mock.Mock() - req.method = 'POST' - req.url = 'http://url1/v1' - req.script_name = '/v1' - mock_extract = self.patchobject(self.middleware, '_parse_url', - return_value=None) - - res = self.middleware.process_request(req) - self.assertIsNone(res) - mock_extract.assert_called_once_with(req.url) - self.assertNotIn('X-Auth-Token', req.headers) - - def test_parse_url_valid(self): - uid = uuidutils.generate_uuid() - - result = self.middleware._parse_url( - 'https://url1/cluster/v1/webhooks/%s/trigger?V=2&k=v' % uid - ) - - self.assertEqual( - (uid, {'k': 'v'}), result - ) - - def test_parse_url_valid_with_port(self): - uid = uuidutils.generate_uuid() - - result = self.middleware._parse_url( - 'http://url1:5000/v1/webhooks/%s/trigger?V=2&k=v' % uid - ) - - self.assertEqual( - (uid, {'k': 'v'}), result - ) - - def test_parse_url_invalid(self): - result = self.middleware._parse_url( - 'http://url1' - ) - - self.assertIsNone(result) - - def test_parse_url_missing_version(self): - uid = uuidutils.generate_uuid() - - result = self.middleware._parse_url( - 'https://url1/cluster/webhooks/%s/trigger?V=2&k=v' % uid - ) - - self.assertIsNone(result) - - def test_parse_url_missing_webhooks(self): - uid = uuidutils.generate_uuid() - - result = self.middleware._parse_url( - 'https://url1/cluster/v1/%s/trigger?V=2&k=v' % uid - ) - - self.assertIsNone(result) diff --git a/senlin/tests/unit/api/openstack/__init__.py b/senlin/tests/unit/api/openstack/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/api/openstack/test_versions.py b/senlin/tests/unit/api/openstack/test_versions.py deleted file mode 100644 index 980bba036..000000000 --- a/senlin/tests/unit/api/openstack/test_versions.py +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import http.client as http_client -from unittest import mock - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import webob - -from senlin.api.common import wsgi -from senlin.api.openstack.v1 import version as v1_controller -from senlin.api.openstack import versions -from senlin.tests.unit.common import base - - -class VersionControllerTest(base.SenlinTestCase): - - def test_init(self): - conf = mock.Mock() - - controller = versions.Controller(conf) - - self.assertIsNotNone(controller) - self.assertEqual(conf, controller.conf) - - @mock.patch.object(v1_controller.VersionController, 'version_info') - def test_call(self, mock_v1): - mock_v1.return_value = {'foo': 'bar'} - conf = mock.Mock() - controller = versions.Controller(conf) - environ = { - 'REQUEST_METHOD': 'GET', - 'SERVER_NAME': 'host', - 'SERVER_PORT': 8777, - 'SCRIPT_NAME': '/', - 'PATH_INFO': '/', - 'wsgi.url_scheme': 'http', - } - req = wsgi.Request(environ) - expected_dict = { - 'versions': [{'foo': 'bar'}] - } - expected_body = jsonutils.dumps(expected_dict) - - resp = controller(req) - - self.assertIsInstance(resp, webob.Response) - self.assertEqual(expected_body, encodeutils.safe_decode(resp.body)) - self.assertEqual(http_client.MULTIPLE_CHOICES, resp.status_code) - self.assertEqual('application/json', resp.content_type) diff --git a/senlin/tests/unit/api/openstack/v1/__init__.py b/senlin/tests/unit/api/openstack/v1/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/api/openstack/v1/test_actions.py b/senlin/tests/unit/api/openstack/v1/test_actions.py deleted file mode 100644 index 88daf92e1..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_actions.py +++ /dev/null @@ -1,486 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import actions -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class ActionControllerTest(shared.ControllerTest, base.SenlinTestCase): - """Tests the API class which acts as the WSGI controller.""" - - def setUp(self): - super(ActionControllerTest, self).setUp() - - # Create WSGI controller instance - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = actions.ActionController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/actions') - - engine_resp = [ - { - 'action': 'NODE_CREATE', - 'cause': 'RPC_Request', - 'cluster_id': 'CLUSTER_FAKE_ID', - 'depended_by': [], - 'depends_on': [], - 'end_time': 1425555000.0, - 'id': '2366d400-c7e3-4961-09254-6d1c3f7ac167', - 'inputs': {}, - 'interval': -1, - 'name': 'node_create_0df0931b', - 'outputs': {}, - 'owner': None, - 'start_time': 1425550000.0, - 'status': 'SUCCEEDED', - 'status_reason': 'Action completed successfully.', - 'target': '0df0931b-e251-4f2e-8719-4effda3627ba', - 'timeout': 3600 - } - ] - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['actions']) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_without_cluster_id(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/actions', version='1.13') - - engine_resp = [ - { - 'action': 'NODE_CREATE', - 'cause': 'RPC_Request', - 'cluster_id': 'CLUSTER_FAKE_ID', - 'depended_by': [], - 'depends_on': [], - 'end_time': 1425555000.0, - 'id': '2366d400-c7e3-4961-09254-6d1c3f7ac167', - 'inputs': {}, - 'interval': -1, - 'name': 'node_create_0df0931b', - 'outputs': {}, - 'owner': None, - 'start_time': 1425550000.0, - 'status': 'SUCCEEDED', - 'status_reason': 'Action completed successfully.', - 'target': '0df0931b-e251-4f2e-8719-4effda3627ba', - 'timeout': 3600 - } - ] - - mock_call.return_value = copy.deepcopy(engine_resp) - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - # list call for version < 1.14 should have cluster_id field removed - # remove cluster_id field from expected response - engine_resp[0].pop('cluster_id') - - self.assertEqual(engine_resp, result['actions']) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_with_cluster_id(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/actions', version='1.14') - - engine_resp = [ - { - 'action': 'NODE_CREATE', - 'cause': 'RPC_Request', - 'cluster_id': 'CLUSTER_FAKE_ID', - 'depended_by': [], - 'depends_on': [], - 'end_time': 1425555000.0, - 'id': '2366d400-c7e3-4961-09254-6d1c3f7ac167', - 'inputs': {}, - 'interval': -1, - 'name': 'node_create_0df0931b', - 'outputs': {}, - 'owner': None, - 'start_time': 1425550000.0, - 'status': 'SUCCEEDED', - 'status_reason': 'Action completed successfully.', - 'target': '0df0931b-e251-4f2e-8719-4effda3627ba', - 'timeout': 3600 - } - ] - - mock_call.return_value = copy.deepcopy(engine_resp) - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['actions']) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_whitelists_params(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - marker_uuid = '8216a86c-1bdc-442e-b493-329385d37cbc' - params = { - 'cluster_id': 'CLUSTER_FAKE_ID', - 'name': 'NODE_CREATE', - 'status': 'SUCCEEDED', - 'limit': 10, - 'marker': marker_uuid, - 'sort': 'status', - 'global_project': True, - } - req = self._get('/actions', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual([], result['actions']) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, - { - 'cluster_id': ['CLUSTER_FAKE_ID'], - 'status': ['SUCCEEDED'], - 'sort': 'status', - 'name': ['NODE_CREATE'], - 'limit': '10', - 'marker': marker_uuid, - 'project_safe': False - }) - mock_call.assert_called_once_with( - req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_whitelists_invalid_params(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'you shall not pass!', - } - req = self._get('/actions', params=params) - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid parameter balrog", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_with_bad_schema(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'status': 'fake'} - req = self._get('/actions', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_limit_not_int(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'limit': 'not-int'} - req = self._get('/actions', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad limit") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad limit", str(ex)) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_global_project_true(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'True'} - req = self._get('/actions', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = [] - - result = self.controller.index(req) - - self.assertEqual([], result['actions']) - mock_parse.assert_called_once_with( - 'ActionListRequest', req, {'project_safe': False}) - mock_call.assert_called_once_with( - req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_global_project_false(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'False'} - req = self._get('/actions', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.Forbidden() - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.json['code']) - self.assertEqual('Forbidden', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "ActionListRequest", mock.ANY, {'project_safe': True}) - mock_call.assert_called_once_with(req.context, 'action_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_index_global_project_not_bool(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'No'} - req = self._get('/actions', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid value 'No' specified for 'global_project'", - str(ex)) - self.assertFalse(mock_call.called) - self.assertFalse(mock_parse.called) - - def test_action_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/actions') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_get_success(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - action_id = 'aaaa-bbbb-cccc' - req = self._get('/actions/%(action_id)s' % {'action_id': action_id}) - - engine_resp = { - 'action': 'NODE_CREATE', - 'cause': 'RPC_Request', - 'cluster_id': 'CLUSTER_FAKE_ID', - 'depended_by': [], - 'depends_on': [], - 'end_time': 1425555000.0, - 'id': '2366d400-c7e3-4961-09254-6d1c3f7ac167', - 'inputs': {}, - 'interval': -1, - 'name': 'node_create_0df0931b', - 'outputs': {}, - 'owner': None, - 'start_time': 1425550000.0, - 'status': 'SUCCEEDED', - 'status_reason': 'Action completed successfully.', - 'target': '0df0931b-e251-4f2e-8719-4effda3627ba', - 'timeout': 3600 - } - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_resp - - response = self.controller.get(req, action_id=action_id) - - self.assertEqual(engine_resp, response['action']) - - mock_parse.assert_called_once_with( - 'ActionGetRequest', req, {'identity': action_id}) - mock_call.assert_called_once_with( - req.context, 'action_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - action_id = 'non-existent-action' - req = self._get('/actions/%(action_id)s' % {'action_id': action_id}) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.ResourceNotFound(type='action', id=action_id) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, action_id=action_id) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'ActionGetRequest', mock.ANY, {'identity': action_id}) - mock_call.assert_called_once_with( - req.context, 'action_get', obj) - - def test_action_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - action_id = 'non-existent-action' - req = self._get('/actions/%(action_id)s' % {'action_id': action_id}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, action_id=action_id) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_update_cancel(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - aid = 'xxxx-yyyy-zzzz' - body = { - 'action': { - 'status': 'CANCELLED' - } - } - - req = self._patch('/actions/%(action_id)s' % {'action_id': aid}, - jsonutils.dumps(body), version='1.12') - obj = mock.Mock() - mock_parse.return_value = obj - - self.assertRaises(exc.HTTPAccepted, - self.controller.update, req, - action_id=aid, body=body) - - mock_parse.assert_called_once_with( - 'ActionUpdateRequest', req, - { - 'identity': aid, - 'status': 'CANCELLED', - 'force': False - }) - mock_call.assert_called_once_with(req.context, 'action_update', obj) - - @mock.patch.object(util, 'parse_bool_param') - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_update_force_cancel(self, mock_call, mock_parse, - mock_parse_bool, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - aid = 'xxxx-yyyy-zzzz' - body = { - 'action': { - 'status': 'CANCELLED' - } - } - params = {'force': 'True'} - req = self._patch( - '/actions/%(action_id)s' % {'action_id': aid}, - jsonutils.dumps(body), version='1.12', params=params) - obj = mock.Mock() - mock_parse.return_value = obj - mock_parse_bool.return_value = True - - self.assertRaises(exc.HTTPAccepted, - self.controller.update, req, - action_id=aid, body=body) - - mock_parse.assert_called_once_with( - 'ActionUpdateRequest', req, - { - 'identity': aid, - 'status': 'CANCELLED', - 'force': True - }) - mock_call.assert_called_once_with(req.context, 'action_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_update_invalid(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - aid = 'xxxx-yyyy-zzzz' - body = {'status': 'FOO'} - - req = self._patch('/actions/%(action_id)s' % {'action_id': aid}, - jsonutils.dumps(body), version='1.12') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, req, - action_id=aid, body=body) - - self.assertEqual("Malformed request data, missing 'action' key " - "in request body.", str(ex)) - - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) diff --git a/senlin/tests/unit/api/openstack/v1/test_buildinfo.py b/senlin/tests/unit/api/openstack/v1/test_buildinfo.py deleted file mode 100644 index d4d8ef0d2..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_buildinfo.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import build_info -from senlin.common import policy -from senlin.objects.requests import build_info as vorb -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class BuildInfoControllerTest(shared.ControllerTest, base.SenlinTestCase): - - def setUp(self): - super(BuildInfoControllerTest, self).setUp() - self.controller = build_info.BuildInfoController({}) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_default_build_revision(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'build_info', True) - req = self._get('/build_info') - - mock_call.return_value = '12.34' - - result = self.controller.build_info(req) - response = result['build_info'] - self.assertIn('api', response) - self.assertIn('engine', response) - self.assertIn('revision', response['api']) - self.assertEqual('1.0', response['api']['revision']) - self.assertIn('revision', response['engine']) - self.assertEqual('12.34', response['engine']['revision']) - - mock_call.assert_called_once_with(req.context, - 'get_revision', mock.ANY) - request = mock_call.call_args[0][2] - self.assertIsInstance(request, vorb.GetRevisionRequest) - - @mock.patch.object(build_info.cfg, 'CONF') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_response_api_build_revision_from_config_file( - self, mock_call, mock_conf, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'build_info', True) - req = self._get('/build_info') - mock_call.return_value = 'engine_revision' - mock_conf.revision = {'senlin_api_revision': 'test'} - - result = self.controller.build_info(req) - response = result['build_info'] - self.assertEqual('test', response['api']['revision']) - mock_call.assert_called_once_with(req.context, - 'get_revision', mock.ANY) - request = mock_call.call_args[0][2] - self.assertIsInstance(request, vorb.GetRevisionRequest) - - def test_build_info_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'build_info', False) - req = self._get('/build_info') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.build_info, - req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) diff --git a/senlin/tests/unit/api/openstack/v1/test_cluster_policies.py b/senlin/tests/unit/api/openstack/v1/test_cluster_policies.py deleted file mode 100644 index 21b733a37..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_cluster_policies.py +++ /dev/null @@ -1,257 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import cluster_policies as cp_mod -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class ClusterPolicyControllerTest(shared.ControllerTest, base.SenlinTestCase): - """Tests the API class which acts as the WSGI controller.""" - - def setUp(self): - super(ClusterPolicyControllerTest, self).setUp() - - # Create WSGI controller instance - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = cp_mod.ClusterPolicyController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_index(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - cid = 'test_cluster' - req = self._get('/cluster_policies/%s' % cid) - - engine_resp = [ - { - 'id': 'fake_id', - 'cluster_id': 'fake cluster id', - 'policy_id': 'fake policy id', - 'enabled': True, - 'data': {}, - 'cluster_name': 'test_cluster', - 'policy_name': 'test_policy', - 'policy_type': 'ScalingPolicy', - } - ] - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req, cluster_id=cid) - - self.assertEqual(engine_resp, result['cluster_policies']) - mock_parse.assert_called_once_with( - 'ClusterPolicyListRequest', req, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_index_with_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - cid = 'FAKE_CLUSTER' - params = { - 'sort': 'enabled', - 'enabled': 'True', - } - req = self._get('/cluster_policies/%s' % cid, params=params) - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req, cluster_id=cid) - - self.assertEqual([], result['cluster_policies']) - mock_parse.assert_called_once_with( - 'ClusterPolicyListRequest', req, - { - 'sort': 'enabled', - 'enabled': True, - 'identity': 'FAKE_CLUSTER' - }) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_index_invalid_params(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - cid = 'FAKE_CLUSTER' - params = { - 'enabled': 'True', - 'balrog': 'you shall not pass!' - } - req = self._get('/cluster_policies/%s' % cid, params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, - req, cluster_id=cid) - self.assertEqual('Invalid parameter balrog', - str(ex)) - self.assertEqual(0, mock_parse.call_count) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_index_invalid_sort(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - cid = 'FAKE_CLUSTER' - params = { - 'enabled': 'True', - 'sort': 'bad sort' - } - req = self._get('/cluster_policies/%s' % cid, params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad sort") - ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, - req, cluster_id=cid) - - self.assertEqual("bad sort", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterPolicyListRequest', req, mock.ANY) - self.assertEqual(0, mock_call.call_count) - - def test_cluster_policy_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - cid = 'FAKE_CLUSTER' - req = self._get('/cluster_policy/%s' % cid) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req, cluster_id=cid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_get_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'FAKE_CLUSTER' - pid = 'FAKE_POLICY' - req = self._get('/cluster_policies/%(cid)s/%(pid)s' - '' % {'cid': cid, 'pid': pid}) - - engine_resp = { - 'id': 'fake_id', - 'cluster_id': cid, - 'policy_id': pid, - 'enabled': True, - 'data': {}, - 'cluster_name': 'test_cluster', - 'policy_name': 'test_policy', - 'policy_type': 'ScalingPolicy', - } - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_resp - - response = self.controller.get(req, cluster_id=cid, policy_id=pid) - - self.assertEqual(engine_resp, response['cluster_policy']) - mock_parse.assert_called_once_with( - 'ClusterPolicyGetRequest', req, - { - 'identity': cid, - 'policy_id': pid - }) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_cluster_policy_get_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'FAKE_CLUSTER' - pid = 'FAKE_POLICY' - req = self._get('/cluster_policies/%(cid)s/%(pid)s' - '' % {'cid': cid, 'pid': pid}) - - error = senlin_exc.PolicyBindingNotFound(policy=pid, identity=cid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, cluster_id=cid, - policy_id=pid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('PolicyBindingNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'ClusterPolicyGetRequest', mock.ANY, - { - 'identity': 'FAKE_CLUSTER', - 'policy_id': 'FAKE_POLICY' - }) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_get', mock.ANY) - - def test_action_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - cid = 'FAKE_CLUSTER' - pid = 'FAKE_POLICY' - req = self._get('/cluster_policies/%(cid)s/%(pid)s' - '' % {'cid': cid, 'pid': pid}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, cluster_id=cid, - policy_id=pid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_action_get_bad_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'FAKE_CLUSTER' - pid = ['Fake'] - req = self._get('/cluster_policies/%(cid)s/%(pid)s' - '' % {'cid': cid, 'pid': pid}) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, self.controller.get, - req, cluster_id=cid, policy_id=pid) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterPolicyGetRequest', req, - { - 'identity': cid, - 'policy_id': pid - }) - self.assertEqual(0, mock_call.call_count) diff --git a/senlin/tests/unit/api/openstack/v1/test_clusters.py b/senlin/tests/unit/api/openstack/v1/test_clusters.py deleted file mode 100644 index f260e577b..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_clusters.py +++ /dev/null @@ -1,1531 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils -from webob import exc - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import clusters -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.objects.requests import clusters as vorc -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(policy, 'enforce') -class ClusterControllerTest(shared.ControllerTest, base.SenlinTestCase): - """Test case for the cluster controller.""" - - def setUp(self): - super(ClusterControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = clusters.ClusterController(options=cfgopts) - self.context = utils.dummy_context() - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_index(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/clusters') - - engine_resp = [{'foo': 'bar'}] - mock_call.return_value = engine_resp - obj = vorc.ClusterListRequest() - mock_parse.return_value = obj - - result = self.controller.index(req) - - expected = {u'clusters': engine_resp} - self.assertEqual(expected, result) - - mock_parse.assert_called_once_with('ClusterListRequest', req, - {'project_safe': True}) - mock_call.assert_called_once_with(req.context, 'cluster_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_index_with_params(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - fake_id = uuidutils.generate_uuid() - params = { - 'name': 'name1', - 'status': 'ACTIVE', - 'limit': '3', - 'marker': fake_id, - 'sort': 'name:asc', - 'global_project': 'True', - } - req = self._get('/clusters', params=params) - obj = vorc.ClusterListRequest() - mock_parse.return_value = obj - engine_resp = [{'foo': 'bar'}] - mock_call.return_value = engine_resp - - result = self.controller.index(req) - - expected = {u'clusters': engine_resp} - self.assertEqual(expected, result) - mock_parse.assert_called_once_with( - 'ClusterListRequest', req, - { - 'name': ['name1'], - 'status': ['ACTIVE'], - 'limit': '3', - 'marker': fake_id, - 'sort': 'name:asc', - 'project_safe': False - }) - - mock_call.assert_called_once_with(req.context, 'cluster_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_index_failed_with_exception(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/clusters', params={}) - mock_parse.side_effect = exc.HTTPBadRequest("Boom") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - "ClusterListRequest", req, {'project_safe': True}) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_index_failed_engine_error(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': True} - req = self._get('/clusters', params=params) - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.Forbidden() - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.json['code']) - self.assertEqual('Forbidden', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "ClusterListRequest", mock.ANY, {'project_safe': False}) - mock_call.assert_called_once_with(req.context, 'cluster_list', obj) - - def test_index_error_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/clusters') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_create(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'cluster': { - 'name': 'test_cluster', - 'desired_capacity': 0, - 'profile_id': 'xxxx-yyyy', - 'min_size': 0, - 'max_size': 0, - 'metadata': {}, - 'timeout': None, - } - } - req = self._post('/clusters', jsonutils.dumps(body)) - engine_response = { - 'id': 'FAKE_ID', - 'name': 'test_cluster', - 'desired_capacity': 0, - 'profile_id': 'xxxx-yyyy', - 'min_size': 0, - 'max_size': 0, - 'metadata': {}, - 'timeout': 60, - 'action': 'fake_action' - } - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.create(req, body=body) - - self.assertEqual(engine_response, resp['cluster']) - self.assertEqual('/actions/fake_action', resp['location']) - mock_parse.assert_called_once_with( - "ClusterCreateRequest", mock.ANY, body, 'cluster') - mock_call.assert_called_once_with(req.context, 'cluster_create', - obj.cluster) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_create_failed_request(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'foo': 'bar'} - req = self._post('/clusters', jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("Boom", str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_create_failed_engine(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'foo': 'bar'} - req = self._post('/clusters', jsonutils.dumps(body)) - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.BadRequest(msg='bad') - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(400, resp.json['code']) - self.assertEqual('BadRequest', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "ClusterCreateRequest", mock.ANY, {'foo': 'bar'}, 'cluster') - mock_call.assert_called_once_with(req.context, 'cluster_create', - obj.cluster) - - def test_create_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', False) - body = { - 'cluster': { - 'name': 'test_cluster', - 'profile_id': 'xxxx-yyyy', - } - } - req = self._post('/clusters', jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'cid' - req = self._get('/clusters/%s' % cid) - engine_resp = {'foo': 'bar'} - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, cluster_id=cid) - - self.assertEqual({'cluster': {'foo': 'bar'}}, response) - mock_parse.assert_called_once_with( - "ClusterGetRequest", req, {'identity': 'cid'}) - mock_call.assert_called_once_with(req.context, 'cluster_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_failed_request(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'FAKE_ID' - req = self._get('/clusters/%s' % cid) - mock_parse.side_effect = exc.HTTPBadRequest("Boom") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.get, - req, cluster_id=cid) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - "ClusterGetRequest", req, {'identity': cid}) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_failed_engine(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - cid = 'non-existent-cluster' - req = self._get('/clusters/%s' % cid) - - error = senlin_exc.ResourceNotFound(type='cluster', id=cid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, cluster_id=cid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "ClusterGetRequest", mock.ANY, {'identity': cid}) - - def test_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - cid = 'cid' - req = self._get('/clusters/%s' % cid) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, cluster_id=cid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_update(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - cid = 'aaaa-bbbb-cccc' - body = {'cluster': {'foo': 'bar'}} - engine_resp = { - 'id': cid, - 'action': 'fake_action', - } - req = self._patch('/clusters/%s' % cid, jsonutils.dumps(body)) - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - res = self.controller.update(req, cluster_id=cid, body=body) - - self.assertEqual( - {'cluster': {'id': cid}, 'location': '/actions/fake_action'}, - res) - mock_parse.assert_called_once_with( - "ClusterUpdateRequest", req, - {'identity': 'aaaa-bbbb-cccc', 'foo': 'bar'}) - mock_call.assert_called_once_with(req.context, 'cluster_update', obj) - - def test_update_missing_cluster_key(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - cid = 'aaaa-bbbb-cccc' - body = {'profile_id': 'xxxx-yyyy-zzzz'} - req = self._patch('/clusters/%s' % cid, jsonutils.dumps(body)) - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, cluster_id=cid, body=body) - - self.assertIn("Malformed request data, missing 'cluster' key " - "in request body.", str(ex)) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_update_failed_request(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - cid = 'aaaa-bbbb-cccc' - body = {'cluster': {'name': 'foo bar'}} - req = self._patch('/clusters/%s' % cid, jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, cluster_id=cid, body=body) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - "ClusterUpdateRequest", req, - {'identity': 'aaaa-bbbb-cccc', 'name': 'foo bar'}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_update_engine_error(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - cid = 'non-existent-cluster' - body = {'cluster': {'profile_id': 'xxxx-yyyy-zzzz'}} - req = self._patch('/clusters/%s' % cid, jsonutils.dumps(body)) - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.ResourceNotFound(type='cluster', id=cid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, cluster_id=cid, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "ClusterUpdateRequest", mock.ANY, - {'identity': cid, 'profile_id': 'xxxx-yyyy-zzzz'}) - mock_call.assert_called_once_with(req.context, 'cluster_update', obj) - - def test_update_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', False) - cid = 'aaaa-bbbb-cccc' - body = {'cluster': {'profile_id': 'xxxx-yyyy-zzzz'}} - - req = self._patch('/clusters/%(cluster_id)s' % {'cluster_id': cid}, - jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, cluster_id=cid, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_add_nodes(self, mock_call, mock_parse, mock_enforce): - req = mock.Mock() - cid = 'FAKE_ID' - data = dict(nodes=['NODE1']) - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_add_nodes(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterAddNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - mock_call.assert_called_once_with( - req.context, 'cluster_add_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_add_nodes_failed_request(self, mock_call, - mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes=['NODE2']) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_add_nodes, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterAddNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_add_nodes_failed_engine(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes=['NODE3']) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_add_nodes, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterAddNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_add_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_del_nodes(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'FAKE_ID' - data = dict(nodes=['NODE4'], destroy=False) - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_del_nodes(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterDelNodesRequest', req, {'identity': cid, - 'nodes': data['nodes'], - 'destroy_after_deletion': False}) - mock_call.assert_called_once_with( - req.context, 'cluster_del_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_del_nodes_failed_request(self, mock_call, - mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes=['NODE5'], destroy=False) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_del_nodes, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterDelNodesRequest', req, {'identity': cid, - 'nodes': data['nodes'], - 'destroy_after_deletion': False}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_del_nodes_failed_engine(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes=['NODE6'], destroy=False) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_del_nodes, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterDelNodesRequest', req, {'identity': cid, - 'nodes': data['nodes'], - 'destroy_after_deletion': False}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_del_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_replace_nodes(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'FAKE_ID' - data = dict(nodes={'OLD': 'NEW'}) - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_replace_nodes(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterReplaceNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - mock_call.assert_called_once_with( - req.context, 'cluster_replace_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_replace_nodes_none(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes=None) - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_replace_nodes, - req, cid, data) - - self.assertEqual("The data provided is not a map", str(ex)) - self.assertEqual(0, mock_parse.call_count) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_replace_nodes_failed_request(self, mock_call, - mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes={'OLD': 'NEW'}) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_replace_nodes, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterReplaceNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_replace_nodes_failed_engine(self, mock_call, - mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(nodes={'OLD': 'NEW'}) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_replace_nodes, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterReplaceNodesRequest', - req, - {'identity': cid, 'nodes': data['nodes']} - ) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_replace_nodes', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def _test_do_resize_with_type(self, adj_type, mock_call, mock_parse): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = { - 'adjustment_type': adj_type, - 'number': 1, - 'min_size': 0, - 'max_size': 10, - 'min_step': 1, - 'strict': True - } - mock_call.return_value = {'action': 'action-id'} - # We are using a real object for testing - obj = vorc.ClusterResizeRequest(identity=cid, **data) - mock_parse.return_value = obj - - resp = self.controller._do_resize(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - params = copy.deepcopy(data) - if params['adjustment_type'] != 'CHANGE_IN_PERCENTAGE': - params.pop('min_step') - params['identity'] = cid - mock_parse.assert_called_once_with( - 'ClusterResizeRequest', req, params) - mock_call.assert_called_once_with(req.context, 'cluster_resize', obj) - - def test_do_resize_exact_capacity(self, mock_enforce): - self._test_do_resize_with_type('EXACT_CAPACITY') - - def test_do_resize_with_change_capacity(self, mock_enforce): - self._test_do_resize_with_type('CHANGE_IN_CAPACITY') - - def test_do_resize_with_change_percentage(self, mock_enforce): - self._test_do_resize_with_type('CHANGE_IN_PERCENTAGE') - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_failed_request(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'adjustment_type': 'EXACT_CAPACITY', 'number': 10} - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_resize, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterResizeRequest', req, - { - 'identity': cid, - 'adjustment_type': 'EXACT_CAPACITY', - 'number': 10 - }) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_missing_number(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'adjustment_type': 'EXACT_CAPACITY'} - obj = vorc.ClusterResizeRequest(identity=cid, **data) - mock_parse.return_value = obj - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_resize, - req, cid, data) - - self.assertEqual('Missing number value for size adjustment.', - str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_missing_type(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'number': 2} - obj = vorc.ClusterResizeRequest(identity=cid, **data) - mock_parse.return_value = obj - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_resize, - req, cid, data) - - self.assertEqual("Missing adjustment_type value for size adjustment.", - str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_max_size_too_small(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'min_size': 2, 'max_size': 1} - obj = vorc.ClusterResizeRequest(identity=cid, **data) - mock_parse.return_value = obj - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_resize, - req, cid, data) - - self.assertEqual("The specified min_size (2) is greater than " - "the specified max_size (1).", str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_empty_params(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {} - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_resize, - req, cid, data) - - self.assertEqual("Not enough parameters to do resize action.", - str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_resize_failed_engine(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'max_size': 200} - obj = vorc.ClusterResizeRequest(identity=cid, **data) - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_resize, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterResizeRequest', req, {'identity': cid, 'max_size': 200}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_resize', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_out(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=1) - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_scale_out(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterScaleOutRequest', - req, - {'identity': cid, 'count': data['count']} - ) - mock_call.assert_called_once_with( - req.context, 'cluster_scale_out', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_out_failed_request(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=2) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_scale_out, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterScaleOutRequest', - req, - {'identity': cid, 'count': data['count']} - ) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_out_failed_engine(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=3) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_scale_out, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterScaleOutRequest', - req, - {'identity': cid, 'count': data['count']} - ) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_scale_out', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_in(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=4) - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_scale_in(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterScaleInRequest', - req, - {'identity': cid, 'count': data['count']} - ) - mock_call.assert_called_once_with( - req.context, 'cluster_scale_in', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_in_failed_request(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=5) - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_scale_in, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterScaleInRequest', - req, - {'identity': cid, 'count': data['count']} - ) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_scale_in_failed_engine(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = dict(count=6) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_scale_in, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterScaleInRequest', - req, - {'identity': cid, 'count': data['count']} - ) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_scale_in', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_attach(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_policy_attach(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterAttachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_attach', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_attach_failed_request(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_policy_attach, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterAttachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_attach_failed_engine(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_policy_attach, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterAttachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_attach', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_detach(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_policy_detach(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterDetachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_detach', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_detach_failed_request(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_policy_detach, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterDetachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_detach_failed_engine(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_policy_detach, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterDetachPolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_detach', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_update(self, mock_call, mock_parse, _ign): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_call.return_value = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller._do_policy_update(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterUpdatePolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_update_failed_request(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_policy_update, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterUpdatePolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_policy_update_failed_engine(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'policy_id': 'xxxx-yyyy'} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_policy_update, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterUpdatePolicyRequest', req, - {'identity': cid, 'policy_id': 'xxxx-yyyy'}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_policy_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_check(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'op': 'value'} - obj = mock.Mock() - mock_parse.return_value = obj - eng_resp = {'action': 'action-id'} - mock_call.return_value = eng_resp - - resp = self.controller._do_check(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterCheckRequest', req, - {'identity': cid, 'params': {'op': 'value'}}) - mock_call.assert_called_once_with(req.context, 'cluster_check', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_check_failed_request(self, mock_call, mock_parse, _ign): - data = {} - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_check, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterCheckRequest', req, {'identity': cid, 'params': {}}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_check_failed_engine(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_check, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterCheckRequest', req, {'identity': cid, 'params': {}}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_check', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_recover(self, mock_call, mock_parse, _ignore): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {'op': 'value'} - obj = mock.Mock() - mock_parse.return_value = obj - eng_resp = {'action': 'action-id'} - mock_call.return_value = eng_resp - - resp = self.controller._do_recover(req, cid, data) - - self.assertEqual({'action': 'action-id'}, resp) - mock_parse.assert_called_once_with( - 'ClusterRecoverRequest', req, - {'identity': cid, 'params': {'op': 'value'}}) - mock_call.assert_called_once_with(req.context, 'cluster_recover', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_recover_failed_request(self, mock_call, mock_parse, _ign): - data = {} - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller._do_recover, - req, cid, data) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterRecoverRequest', req, {'identity': cid, 'params': {}}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_do_recover_failed_engine(self, mock_call, mock_parse, _i): - req = mock.Mock() - cid = 'aaaa-bbbb-cccc' - data = {} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller._do_recover, - req, cid, data) - - mock_parse.assert_called_once_with( - 'ClusterRecoverRequest', req, {'identity': cid, 'params': {}}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_recover', obj) - - def test_cluster_action_missing_action(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - cid = 'aaaa-bbbb-cccc' - body = {} - req = self._post('/clusters/%s/actions' % cid, jsonutils.dumps(body)) - - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, cluster_id=cid, body=body) - self.assertEqual('No action specified', str(ex)) - self.assertFalse(mock_call.called) - - def test_cluster_action_multiple_actions(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - cid = 'aaaa-bbbb-cccc' - body = {'action_1': {}, 'action_2': {}} - req = self._post('/clusters/%s/actions' % cid, jsonutils.dumps(body)) - - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, cluster_id=cid, body=body) - self.assertEqual('Multiple actions specified', str(ex)) - self.assertFalse(mock_call.called) - - def test_cluster_action_unsupported_action(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - cid = 'aaaa-bbbb-cccc' - body = {'fly': None} - req = self._post('/clusters/%s/actions' % cid, jsonutils.dumps(body)) - - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, cluster_id=cid, body=body) - self.assertEqual("Unrecognized action 'fly' specified", - str(ex)) - self.assertFalse(mock_call.called) - - def test_cluster_action_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', False) - cid = 'aaaa-bbbb-cccc' - body = {'someaction': {'param': 'value'}} - - req = self._post('/clusters/%s/actions' % cid, jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.action, - req, cluster_id=cid, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - def test_cluster_action_data_not_map(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - cid = 'aaaa-bbbb-cccc' - body = {'resize': ['param1', 'param2']} - - req = self._post('/clusters/%s/actions' % cid, jsonutils.dumps(body)) - - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, cluster_id=cid, body=body) - self.assertEqual('The data provided is not a map', str(ex)) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect(self, mock_call, mock_parse, mock_enforce): - req = mock.Mock(context=self.context) - cid = 'aaaa-bbbb-cccc' - path = 'foo.bar' - eng_resp = {'cluster_attributes': [{'key': 'value'}]} - mock_call.return_value = eng_resp - obj = vorc.ClusterResizeRequest(identity=cid, path=path) - mock_parse.return_value = obj - - resp = self.controller.collect(req, cluster_id=cid, path=path) - - self.assertEqual(eng_resp, resp) - mock_call.assert_called_once_with(req.context, 'cluster_collect', obj) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect_version_mismatch(self, mock_call, mock_enforce): - # NOTE: we skip the mock_enforce setup below because api version check - # comes before the policy enforcement and the check fails in - # this test case. - cid = 'aaaa-bbbb-cccc' - path = 'foo.bar' - req = self._get('/clusters/%(cid)s/attrs/%(path)s' % - {'cid': cid, 'path': path}, version='1.1') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.collect, - req, cluster_id=cid, path=path) - - self.assertEqual(0, mock_call.call_count) - self.assertEqual("API version '1.1' is not supported on this method.", - str(ex)) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect_path_not_provided(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'collect', True) - req = mock.Mock(context=self.context) - cid = 'aaaa-bbbb-cccc' - path = ' ' - req = self._get('/clusters/%(cid)s/attrs/%(path)s' % - {'cid': cid, 'path': path}, version='1.2') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.collect, - req, cluster_id=cid, path=path) - - self.assertEqual('Required path attribute is missing.', - str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect_path_is_none(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'collect', True) - req = mock.Mock(context=self.context) - cid = 'aaaa-bbbb-cccc' - path = 'None' - req = self._get('/clusters/%(cid)s/attrs/%(path)s' % - {'cid': cid, 'path': path}, version='1.2') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.collect, - req, cluster_id=cid, path=path) - - self.assertEqual('Required path attribute is missing.', - str(ex)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect_failed_request(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'collect', True) - req = mock.Mock(context=self.context) - cid = 'aaaa-bbbb-cccc' - path = 'foo.bar' - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.collect, - req, cluster_id=cid, path=path) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterCollectRequest', req, {'identity': cid, 'path': path}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_collect_failed_engine(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'collect', True) - req = mock.Mock(context=self.context) - cid = 'aaaa-bbbb-cccc' - path = 'foo.bar' - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.side_effect = senlin_exc.BadRequest(msg='Boom') - - ex = self.assertRaises(senlin_exc.BadRequest, - self.controller.collect, - req, cluster_id=cid, path=path) - - mock_parse.assert_called_once_with( - 'ClusterCollectRequest', req, {'identity': cid, 'path': path}) - self.assertEqual("Boom.", str(ex)) - mock_call.assert_called_once_with( - req.context, 'cluster_collect', obj) - - def test_collect_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'collect', False) - cid = 'aaaa-bbbb-cccc' - path = 'foo.bar' - req = self._get('/clusters/%(cid)s/attrs/%(path)s' % - {'cid': cid, 'path': path}, version='1.2') - mock_call = self.patchobject(rpc_client.EngineClient, 'call') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.collect, - req, cluster_id=cid, path=path) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_operation(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - cid = 'aaaa-bbbb-cccc' - body = { - 'dance': { - 'params': { - 'style': 'tango' - }, - 'filters': { - 'role': 'slave' - } - } - } - req = self._post('/clusters/aaaa-bbbb-cccc/ops', - jsonutils.dumps(body), version='1.4') - eng_resp = {'action': 'ACTION_ID'} - mock_call.return_value = eng_resp - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.operation(req, cluster_id=cid, body=body) - - self.assertEqual(eng_resp, resp) - mock_call.assert_called_once_with(req.context, 'cluster_op', obj) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_operation_version_mismatch(self, mock_call, mock_enforce): - cid = 'aaaa-bbbb-cccc' - body = {'dance': {}} - req = self._post('/clusters/aaaa-bbbb/ops', jsonutils.dumps(body), - version='1.1') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.operation, - req, cluster_id=cid, body=body) - - self.assertEqual(0, mock_call.call_count) - self.assertEqual("API version '1.1' is not supported on this method.", - str(ex)) - - def test_operation_no_operations(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - cid = 'aaaa-bbbb-cccc' - body = {} - req = self._post('/clusters/aaaa-bbbb-cccc/ops', - jsonutils.dumps(body), version='1.4') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.operation, - req, cluster_id=cid, body=body) - - self.assertEqual("No operation specified", str(ex)) - - def test_operation_multi_operations(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - cid = 'aaaa-bbbb-cccc' - body = {'dance': {}, 'sing': {}} - req = self._post('/clusters/aaaa-bbbb-cccc/ops', - jsonutils.dumps(body), version='1.4') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.operation, - req, cluster_id=cid, body=body) - - self.assertEqual("Multiple operations specified", str(ex)) - - def test_cluster_operation_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', False) - body = {'someoperation': {}} - req = self._post('/clusters/abc/ops', jsonutils.dumps(body), - version='1.4') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.operation, - req, cluster_id='abc', body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_delete(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - req = mock.Mock(context=self.context) - req.params.get.return_value = 'false' - cid = 'aaaa-bbbb-cccc' - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = {'action': 'FAKE_ID'} - - res = self.controller.delete(req, cluster_id=cid) - - result = {'location': '/actions/FAKE_ID'} - self.assertEqual(result, res) - mock_parse.assert_called_once_with( - 'ClusterDeleteRequest', req, {'identity': cid, 'force': False}) - - mock_call.assert_called_with(req.context, 'cluster_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_delete_failed_request(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - cid = 'fake-cluster' - req = mock.Mock(context=self.context) - req.params.get.return_value = 'false' - mock_parse.side_effect = exc.HTTPBadRequest('Boom') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.delete, - req, cluster_id=cid) - - self.assertEqual("Boom", str(ex)) - mock_parse.assert_called_once_with( - 'ClusterDeleteRequest', req, {'identity': cid, 'force': False}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_delete_failed_engine(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - cid = 'aaaa-bbbb-cccc' - req = self._delete('/clusters/%s' % cid, params={'force': 'false'}) - error = senlin_exc.ResourceNotFound(type='cluster', id=cid) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, cluster_id=cid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_delete_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', False) - cid = 'aaaa-bbbb-cccc' - req = self._delete('/clusters/%s' % cid) - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, cluster_id=cid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_delete_force(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - req = mock.Mock(context=self.context) - req.params.get.return_value = 'true' - cid = 'aaaa-bbbb-cccc' - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = {'action': 'FAKE_ID'} - - params = {'cluster_id': cid, 'body': {'force': True}} - res = self.controller.delete(req, **params) - - result = {'location': '/actions/FAKE_ID'} - self.assertEqual(result, res) - mock_parse.assert_called_once_with( - 'ClusterDeleteRequest', req, {'identity': cid, 'force': True}) - - mock_call.assert_called_with(req.context, 'cluster_delete', obj) diff --git a/senlin/tests/unit/api/openstack/v1/test_events.py b/senlin/tests/unit/api/openstack/v1/test_events.py deleted file mode 100644 index 19ae0ca13..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_events.py +++ /dev/null @@ -1,306 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import events -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class EventControllerTest(shared.ControllerTest, base.SenlinTestCase): - """Tests the API class which acts as the WSGI controller.""" - - def setUp(self): - super(EventControllerTest, self).setUp() - # Create WSGI controller instance - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = events.EventController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/events') - - engine_resp = [ - { - "action": "create", - "cluster_id": None, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15.000000", - "user": "a21ded6060534d99840658a777c2af5a" - } - ] - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['events']) - mock_parse.assert_called_once_with( - 'EventListRequest', req, - { - 'project_safe': True - }) - mock_call.assert_called_once_with(req.context, 'event_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_whitelists_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - marker_uuid = '8216a86c-1bdc-442e-b493-329385d37cbd' - params = { - 'otype': 'NODE', - 'oname': 'mynode1', - 'action': 'NODE_CREATE', - 'level': 'ERROR', - 'limit': 10, - 'marker': marker_uuid, - 'sort': 'timestamp', - 'global_project': False, - } - - req = self._get('/events', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual([], result['events']) - mock_parse.assert_called_once_with( - 'EventListRequest', req, - { - 'sort': 'timestamp', - 'project_safe': True, - 'level': ['ERROR'], - 'action': ['NODE_CREATE'], - 'otype': ['NODE'], - 'limit': '10', - 'marker': marker_uuid, - 'oname': ['mynode1'] - }) - mock_call.assert_called_once_with(req.context, - 'event_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_whitelists_invalid_params(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'you shall not pass!', - } - req = self._get('/events', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid parameter balrog", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_with_bad_schema(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'level': 'fake'} - req = self._get('/events', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("invalid value") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - self.assertEqual("invalid value", str(ex)) - mock_parse.assert_called_once_with( - 'EventListRequest', req, mock.ANY) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_limit_not_int(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'limit': 'not-int'} - req = self._get('/event', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("not int") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("not int", str(ex)) - mock_parse.assert_called_once_with( - 'EventListRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_global_project_true(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'True'} - req = self._get('/events', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.Forbidden() - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.json['code']) - self.assertEqual('Forbidden', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "EventListRequest", mock.ANY, {'project_safe': False}) - mock_call.assert_called_once_with(req.context, 'event_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_events_index_global_project_false(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'False'} - req = self._get('/events', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.index(req) - - self.assertEqual([], resp['events']) - mock_parse.assert_called_once_with( - 'EventListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with(req.context, 'event_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_index_global_project_not_bool(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'No'} - req = self._get('/events', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid value 'No' specified for 'global_project'", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - def test_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/events') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_get_success(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - event_id = "2d255b9c-8f36-41a2-a137-c0175ccc29c3" - req = self._get('/events/%(event_id)s' % {'event_id': event_id}) - - engine_resp = { - "action": "create", - "cluster_id": None, - "id": "2d255b9c-8f36-41a2-a137-c0175ccc29c3", - "level": "20", - "oid": "0df0931b-e251-4f2e-8719-4ebfda3627ba", - "oname": "node009", - "otype": "NODE", - "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", - "status": "CREATING", - "status_reason": "Initializing", - "timestamp": "2015-03-05T08:53:15.000000", - "user": "a21ded6060534d99840658a777c2af5a" - } - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, event_id=event_id) - - self.assertEqual(engine_resp, response['event']) - mock_parse.assert_called_once_with( - 'EventGetRequest', req, {'identity': event_id}) - mock_call.assert_called_once_with( - req.context, 'event_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_event_get_not_found(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - event_id = 'non-existent-event' - req = self._get('/events/%(event_id)s' % {'event_id': event_id}) - - error = senlin_exc.ResourceNotFound(type='event', id=event_id) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, event_id=event_id) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'EventGetRequest', mock.ANY, {'identity': event_id}) - mock_call.assert_called_once_with( - req.context, 'event_get', mock.ANY) - - def test_event_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - event_id = 'non-existent-event' - req = self._get('/events/%(event_id)s' % {'event_id': event_id}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, event_id=event_id) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) diff --git a/senlin/tests/unit/api/openstack/v1/test_nodes.py b/senlin/tests/unit/api/openstack/v1/test_nodes.py deleted file mode 100644 index 5ae00e2ed..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_nodes.py +++ /dev/null @@ -1,1151 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import nodes -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class NodeControllerTest(shared.ControllerTest, base.SenlinTestCase): - - def setUp(self): - super(NodeControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = nodes.NodeController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/nodes') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'node-1', - u'cluster_id': None, - u'physical_id': None, - u'profile_id': u'pppp-rrrr-oooo-ffff', - u'profile_name': u'my_stack_profile', - u'index': 1, - u'role': None, - u'init_time': u'2015-01-23T13:06:00Z', - u'created_time': u'2015-01-23T13:07:22Z', - u'updated_time': None, - u'status': u'ACTIVE', - u'status_reason': u'Node successfully created', - u'data': {}, - u'metadata': {}, - } - ] - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_resp - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['nodes']) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_without_tainted(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/nodes', version='1.12') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'node-1', - u'cluster_id': None, - u'physical_id': None, - u'profile_id': u'pppp-rrrr-oooo-ffff', - u'profile_name': u'my_stack_profile', - u'index': 1, - u'role': None, - u'init_time': u'2015-01-23T13:06:00Z', - u'created_time': u'2015-01-23T13:07:22Z', - u'updated_time': None, - u'status': u'ACTIVE', - u'status_reason': u'Node successfully created', - u'data': {}, - u'metadata': {}, - u'tainted': False, - } - ] - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = copy.deepcopy(engine_resp) - - result = self.controller.index(req) - - # list call for version 1.12 should have tainted field removed - # remove tainted field from expected response - engine_resp[0].pop('tainted') - - self.assertEqual(engine_resp, result['nodes']) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_with_tainted(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/nodes', version='1.13') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'node-1', - u'cluster_id': None, - u'physical_id': None, - u'profile_id': u'pppp-rrrr-oooo-ffff', - u'profile_name': u'my_stack_profile', - u'index': 1, - u'role': None, - u'init_time': u'2015-01-23T13:06:00Z', - u'created_time': u'2015-01-23T13:07:22Z', - u'updated_time': None, - u'status': u'ACTIVE', - u'status_reason': u'Node successfully created', - u'data': {}, - u'metadata': {}, - u'tainted': False, - } - ] - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = copy.deepcopy(engine_resp) - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['nodes']) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_whitelists_params(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - marker_uuid = '69814221-5013-4cb6-a943-6bfe9837547d' - params = { - 'name': 'node01', - 'status': 'ACTIVE', - 'cluster_id': 'id or name of a cluster', - 'limit': '10', - 'marker': marker_uuid, - 'sort': 'name:asc', - 'global_project': 'True', - } - req = self._get('/nodes', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = [] - - result = self.controller.index(req) - - self.assertEqual([], result['nodes']) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, - { - 'status': ['ACTIVE'], - 'sort': 'name:asc', - 'name': ['node01'], - 'limit': '10', - 'marker': marker_uuid, - 'cluster_id': 'id or name of a cluster', - 'project_safe': False - }) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_whitelists_invalid_params(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'you shall not pass!' - } - req = self._get('/nodes', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid parameter balrog", - str(ex)) - self.assertFalse(mock_call.called) - self.assertFalse(mock_parse.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_global_project_true(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'True'} - req = self._get('/nodes', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.Forbidden() - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.json['code']) - self.assertEqual('Forbidden', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "NodeListRequest", mock.ANY, {'project_safe': False}) - mock_call.assert_called_once_with(req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_global_project_false(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'False'} - req = self._get('/nodes', params=params) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = [] - - result = self.controller.index(req) - - self.assertEqual([], result['nodes']) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_global_project_not_bool(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'No'} - req = self._get('/nodes', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid value 'No' specified for 'global_project'", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_limit_not_int(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'limit': 'not-int'} - req = self._get('/nodes', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad limit") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad limit", str(ex)) - mock_parse.assert_called_once_with( - 'NodeListRequest', req, - {'limit': 'not-int', 'project_safe': True}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_index_cluster_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - cluster_id = 'non-existent' - req = self._get('/nodes', {'cluster_id': cluster_id}) - - obj = mock.Mock() - mock_parse.return_value = obj - msg = "Cannot find the given cluster: non-existent" - error = senlin_exc.BadRequest(msg=msg) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, req) - self.assertEqual(400, resp.json['code']) - self.assertEqual('BadRequest', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'NodeListRequest', mock.ANY, - { - 'cluster_id': 'non-existent', - 'project_safe': True - }) - mock_call.assert_called_once_with( - req.context, 'node_list', obj) - - def test_node_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/nodes') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'cluster_id': None, - 'role': None, - 'metadata': {}, - } - } - - engine_response = { - 'id': 'test_node_id', - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'cluster_id': None, - 'role': None, - 'metadata': {}, - 'action': 'fake_action' - } - - req = self._post('/nodes', jsonutils.dumps(body)) - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - resp = self.controller.create(req, body=body) - expected = { - 'node': engine_response, - 'location': '/actions/fake_action' - } - self.assertEqual(expected, resp) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', req, body, 'node') - mock_call.assert_called_once_with( - req.context, 'node_create', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_with_bad_body(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'foo': 'bar'} - req = self._post('/nodes', jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', req, body, 'node') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_with_missing_profile_id(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'node': { - 'name': 'test_node' - } - } - - req = self._post('/nodes', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("miss profile") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - self.assertEqual("miss profile", str(ex)) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', req, body, 'node') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_with_missing_name(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'node': { - 'profile_id': 'xxxx-yyyy' - } - } - - req = self._post('/nodes', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("miss name") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - self.assertEqual("miss name", str(ex)) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', req, body, 'node') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_with_bad_profile(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'bad-profile', - 'cluster_id': None, - 'role': None, - 'metadata': {}, - } - } - req = self._post('/nodes', jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='profile', id='bad-profile') - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', mock.ANY, body, 'node') - mock_call.assert_called_once_with( - req.context, 'node_create', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_create_with_bad_cluster(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy-zzzz', - 'cluster_id': 'non-existent-cluster', - 'role': None, - 'metadata': {}, - } - } - req = self._post('/nodes', jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='cluster', - id='non-existent-cluster') - mock_call.side_effect = shared.to_remote_error(error) - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'NodeCreateRequest', mock.ANY, body, 'node') - mock_call.assert_called_once_with( - req.context, 'node_create', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_adopt(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'adopt', True) - body = { - 'identity': 'PHYSICAL', - 'type': 'RES-TYPE', - 'name': 'test_node', - 'cluster': 'CLUSTER', - 'role': 'ROLE', - 'metadata': {'MK': 'MV'}, - 'overrides': {'NKEY': 'NVAL'}, - 'snapshot': True, - } - - engine_response = { - 'id': 'test_node_id', - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'cluster_id': 'test_cluster_id', - 'role': 'ROLE', - 'metadata': {'MK': 'MV'}, - } - req = self._post('/nodes/adopt', jsonutils.dumps(body), - version='1.7') - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - resp = self.controller.adopt(req, body=body) - - self.assertEqual({'node': engine_response}, resp) - mock_parse.assert_called_once_with('NodeAdoptRequest', req, body) - mock_call.assert_called_once_with(req.context, 'node_adopt', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_adopt_preview(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'adopt_preview', True) - body = { - 'identity': 'PHYSICAL', - 'type': 'PROF-TYPE', - 'overrides': {'NKEY': 'NVAL'}, - 'snapshot': True, - } - - engine_response = { - 'type': 'PROF-TYPE', - 'properties': { - 'foo': 'bar' - } - } - req = self._post('/nodes/adopt/preview', jsonutils.dumps(body), - version='1.7') - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - resp = self.controller.adopt_preview(req, body=body) - - self.assertEqual({'node_profile': engine_response}, resp) - mock_parse.assert_called_once_with('NodeAdoptPreviewRequest', - req, body) - mock_call.assert_called_once_with(req.context, 'node_adopt_preview', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_get_success(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - node_id = 'aaaa-bbbb-cccc' - req = self._get('/nodes/%(node_id)s' % {'node_id': node_id}) - - engine_resp = { - u'id': 'aaaa-bbbb-cccc', - u'name': 'node-1', - u'cluster_id': None, - u'physical_id': None, - u'profile_id': 'pppp-rrrr-oooo-ffff', - u'profile_name': u'my_stack_profile', - u'index': 1, - u'role': None, - u'init_time': u'2015-01-23T13:06:00Z', - u'created_time': u'2015-01-23T13:07:22Z', - u'updated_time': None, - u'status': u'ACTIVE', - u'status_reason': u'Node successfully created', - u'data': {}, - u'metadata': {}, - u'details': {} - } - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_resp - - response = self.controller.get(req, node_id=node_id) - - self.assertEqual(engine_resp, response['node']) - mock_parse.assert_called_once_with( - 'NodeGetRequest', req, {'identity': node_id}) - mock_call.assert_called_once_with(req.context, 'node_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_get_show_details_not_bool(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - node_id = 'aaaa-bbbb-cccc' - params = {'show_details': 'Okay'} - req = self._get('/nodes/%(node_id)s' % {'node_id': node_id}, - params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.get, - req, node_id=node_id) - - self.assertEqual("Invalid value 'Okay' specified for 'show_details'", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_get_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - node_id = 'non-existent-node' - req = self._get('/nodes/%(node_id)s' % {'node_id': node_id}) - - error = senlin_exc.ResourceNotFound(type='node', id=node_id) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, node_id=node_id) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'NodeGetRequest', mock.ANY, {'identity': node_id}) - mock_call.assert_called_once_with( - req.context, 'node_get', mock.ANY) - - def test_node_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - node_id = 'non-existent-node' - req = self._get('/nodes/%(node_id)s' % {'node_id': node_id}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, node_id=node_id) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_update_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - nid = 'aaaa-bbbb-cccc' - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'role': None, - 'metadata': {}, - } - } - aid = 'xxxx-yyyy-zzzz' - - engine_response = body['node'].copy() - engine_response['action'] = aid - - req = self._patch('/nodes/%(node_id)s' % {'node_id': nid}, - jsonutils.dumps(body)) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - res = self.controller.update(req, node_id=nid, body=body) - - mock_parse.assert_called_once_with( - 'NodeUpdateRequest', req, - { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'role': None, - 'metadata': {}, - 'identity': nid - }) - mock_call.assert_called_once_with(req.context, 'node_update', obj) - result = { - 'node': engine_response, - 'location': '/actions/%s' % aid, - } - self.assertEqual(result, res) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_update_malformed_request(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - nid = 'aaaa-bbbb-cccc' - body = {'name': 'new name'} - - req = self._patch('/nodes/%(node_id)s' % {'node_id': nid}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, req, - node_id=nid, body=body) - self.assertEqual("Malformed request data, missing 'node' key " - "in request body.", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_update_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - nid = 'non-exist-node' - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'role': None, - 'metadata': {}, - } - } - - req = self._patch('/nodes/%(node_id)s' % {'node_id': nid}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='node', id=nid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, node_id=nid, body=body) - - mock_call.assert_called_with(req.context, 'node_update', mock.ANY) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - msg = "The node 'non-exist-node' could not be found." - self.assertEqual(msg, resp.json['error']['message']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_update_invalid_profile(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - nid = 'aaaa-bbbb-cccc' - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'profile-not-exist', - 'role': None, - 'metadata': {}, - } - } - - req = self._patch('/nodes/%(node_id)s' % {'node_id': nid}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='profile', id=nid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, node_id=nid, body=body) - mock_parse.assert_called_once_with( - 'NodeUpdateRequest', mock.ANY, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'node_update', mock.ANY) - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - msg = "The profile 'aaaa-bbbb-cccc' could not be found." - self.assertEqual(msg, resp.json['error']['message']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_update_cluster_id_specified(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - nid = 'aaaa-bbbb-cccc' - body = {'node': {'cluster_id': 'xxxx-yyyy-zzzz'}} - - req = self._patch('/nodes/%(node_id)s' % {'node_id': nid}, - jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("miss cluster") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, node_id=nid, body=body) - - self.assertEqual("miss cluster", str(ex)) - mock_parse.assert_called_once_with( - 'NodeUpdateRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - def test_node_update_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', False) - node_id = 'test-node-1' - body = { - 'node': { - 'name': 'test_node', - 'profile_id': 'xxxx-yyyy', - 'role': None, - 'metadata': {}, - } - } - req = self._patch('/nodes/%(node_id)s' % {'node_id': node_id}, - jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, node_id=node_id, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_delete_success(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - nid = 'aaaa-bbbb-cccc' - req = self._delete('/nodes/%(node_id)s' % {'node_id': nid}, - params={'force': False}) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = {'action': 'FAKE_ID'} - - res = self.controller.delete(req, node_id=nid) - - result = {'location': '/actions/FAKE_ID'} - self.assertEqual(res, result) - mock_parse.assert_called_once_with( - 'NodeDeleteRequest', req, - {'identity': 'aaaa-bbbb-cccc', 'force': False}) - mock_call.assert_called_once_with(req.context, 'node_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_delete_err_malformed_node_id(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - nid = {'k1': 'v1'} - req = self._delete('/nodes/%(node_id)s' % {'node_id': nid}, - params={'force': False}) - - mock_parse.side_effect = exc.HTTPBadRequest("bad node") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.delete, req, - node_id=nid) - self.assertEqual("bad node", str(ex)) - self.assertFalse(mock_call.called) - mock_parse.assert_called_once_with( - 'NodeDeleteRequest', req, {'identity': nid, 'force': False}) - - def test_node_delete_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', False) - nid = 'aaaa-bbbb-cccc' - req = self._delete('/nodes/%(node_id)s' % {'node_id': nid}, - params={'force': False}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, node_id=nid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_delete_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - nid = 'aaaa-bbbb-cccc' - req = self._delete('/nodes/%(node_id)s' % {'node_id': nid}, - params={'force': False}) - - error = senlin_exc.ResourceNotFound(type='node', id=nid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, node_id=nid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_delete_force(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - nid = 'aaaa-bbbb-cccc' - req = self._delete('/nodes/%(node_id)s' % {'node_id': nid}, - params={'force': 'true'}) - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = {'action': 'FAKE_ID'} - params = {'node_id': nid, 'body': {'force': True}} - res = self.controller.delete(req, **params) - - result = {'location': '/actions/FAKE_ID'} - self.assertEqual(res, result) - mock_parse.assert_called_once_with( - 'NodeDeleteRequest', req, - {'identity': 'aaaa-bbbb-cccc', 'force': True}) - mock_call.assert_called_once_with(req.context, 'node_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_check_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'test-node-1' - body = {'check': {}} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - engine_response = {'action': 'action-id'} - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - response = self.controller.action(req, node_id=node_id, body=body) - - location = {'location': '/actions/action-id'} - engine_response.update(location) - self.assertEqual(engine_response, response) - mock_parse.assert_called_once_with( - 'NodeCheckRequest', req, - {'params': {}, 'identity': 'test-node-1'}) - mock_call.assert_called_once_with(req.context, 'node_check', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_check_node_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'unknown-node' - body = {'check': {}} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='node', id=node_id) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.action, - req, node_id=node_id, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_recover_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'xxxx-yyyy' - body = {'recover': {}} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - engine_response = {'action': 'action-id'} - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - response = self.controller.action(req, node_id=node_id, body=body) - - location = {'location': '/actions/action-id'} - engine_response.update(location) - self.assertEqual(engine_response, response) - mock_parse.assert_called_once_with( - 'NodeRecoverRequest', req, - {'params': {}, 'identity': 'xxxx-yyyy'}) - mock_call.assert_called_once_with(req.context, 'node_recover', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_recover_node_not_found(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'xxxx-yyyy' - body = {'recover': {}} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='node', id=node_id) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.action, - req, node_id=node_id, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_invalid_params(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'unknown-node' - body = {'check': 'foo'} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, node_id=node_id, body=body) - self.assertEqual("The params provided is not a map.", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_missing_action(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'xxxx-yyyy' - body = {} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, req, - node_id=node_id, body=body) - - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - self.assertEqual(400, ex.code) - self.assertIn('No action specified.', str(ex)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_multiple_action(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'xxxx-yyyy' - body = {'eat': {}, 'sleep': {}} - req = self._post('/nodes/%(node_id)s/actions' % {'node_id': node_id}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, node_id=node_id, body=body) - - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - self.assertEqual(400, ex.code) - self.assertIn('Multiple actions specified.', str(ex)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_action_unknown_action(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'action', True) - node_id = 'xxxx-yyyy' - body = {'eat': None} - req = self._post('/nodes/%(node_id)s/action' % {'node_id': node_id}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.action, - req, node_id=node_id, body=body) - - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - self.assertEqual(400, ex.code) - self.assertIn("Unrecognized action 'eat' specified", - str(ex)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_node_operation(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - node_id = 'xxxx-yyyy' - body = {'dance': {'style': 'rumba'}} - req = self._post('/nodes/%(node_id)s/ops' % {'node_id': node_id}, - jsonutils.dumps(body), version='1.4') - - engine_response = {'action': 'action-id'} - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = engine_response - - response = self.controller.operation(req, node_id=node_id, - body=body) - - expected_response = {'location': '/actions/action-id', - 'action': 'action-id'} - - self.assertEqual(response, expected_response) - mock_parse.assert_called_once_with( - 'NodeOperationRequest', req, - {'identity': 'xxxx-yyyy', - 'operation': 'dance', - 'params': {'style': 'rumba'} - }) - mock_call.assert_called_once_with(req.context, 'node_op', obj) - - def test_node_operation_version_mismatch(self, mock_enforce): - node_id = 'xxxx-yyyy' - body = {} - req = self._post('/nodes/%(node_id)s/ops' % {'node_id': node_id}, - jsonutils.dumps(body), version='1.3') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.operation, - req, node_id=node_id, body=body) - - self.assertEqual("API version '1.3' is not supported on this " - "method.", str(ex)) - - def test_node_operation_missing_operation(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - node_id = 'xxxx-yyyy' - body = {} - req = self._post('/nodes/%(node_id)s/ops' % {'node_id': node_id}, - jsonutils.dumps(body), version='1.4') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.operation, req, - node_id=node_id, body=body) - - self.assertEqual(400, ex.code) - self.assertIn('No operation specified.', str(ex)) - - def test_node_operation_multiple_operation(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'operation', True) - node_id = 'xxxx-yyyy' - body = {'eat': {}, 'sleep': {}} - req = self._post('/nodes/%(node_id)s/ops' % {'node_id': node_id}, - jsonutils.dumps(body), version='1.4') - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.operation, - req, node_id=node_id, body=body) - - self.assertEqual(400, ex.code) - self.assertIn('Multiple operations specified.', str(ex)) diff --git a/senlin/tests/unit/api/openstack/v1/test_policies.py b/senlin/tests/unit/api/openstack/v1/test_policies.py deleted file mode 100644 index ce216510e..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_policies.py +++ /dev/null @@ -1,686 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import policies -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.objects.requests import policies as vorp -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class PolicyControllerTest(shared.ControllerTest, base.SenlinTestCase): - def setUp(self): - super(PolicyControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = policies.PolicyController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_index_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/policies') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'policy-1', - u'type': u'test_policy_type', - u'spec': { - u'param_1': u'value1', - u'param_2': u'value2', - }, - u'created_time': u'2015-02-24T19:17:22Z', - u'updated_time': None, - } - ] - - mock_call.return_value = engine_resp - obj = vorp.PolicyListRequest() - mock_parse.return_value = obj - - result = self.controller.index(req) - - expected = {u'policies': engine_resp} - self.assertEqual(expected, result) - - mock_parse.assert_called_once_with('PolicyListRequest', req, - {'project_safe': True}) - mock_call.assert_called_once_with(req.context, 'policy_list', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_index_whitelists_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - fake_id = uuidutils.generate_uuid() - params = { - 'name': 'FAKE', - 'type': 'TYPE', - 'limit': 20, - 'marker': fake_id, - 'sort': 'name:asc', - 'global_project': True, - } - req = self._get('/policies', params=params) - obj = vorp.PolicyListRequest() - mock_parse.return_value = obj - - engine_resp = [{'foo': 'bar'}] - mock_call.return_value = engine_resp - - result = self.controller.index(req) - - expected = {u'policies': engine_resp} - self.assertEqual(expected, result) - - mock_parse.assert_called_once_with( - 'PolicyListRequest', req, - { - 'name': ['FAKE'], - 'type': ['TYPE'], - 'limit': '20', - 'marker': fake_id, - 'sort': 'name:asc', - 'project_safe': False, - }) - mock_call.assert_called_once_with(req.context, 'policy_list', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_index_whitelist_bad_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'fake_value' - } - req = self._get('/policies', params=params) - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - self.assertEqual("Invalid parameter balrog", str(ex)) - self.assertEqual(0, mock_call.call_count) - self.assertEqual(0, mock_parse.call_count) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_index_invalid_param(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'limit': '10', - } - req = self._get('/policies', params=params) - err = "Invalid value 'No' specified for 'global_project'" - mock_parse.side_effect = exc.HTTPBadRequest(err) - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, - req) - - self.assertEqual(err, str(ex)) - self.assertEqual(0, mock_call.call_count) - mock_parse.assert_called_once_with( - 'PolicyListRequest', req, {'limit': '10', - 'project_safe': True}) - - def test_policy_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/policies') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_create_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'policy': { - 'name': 'test_policy', - 'spec': { - 'type': 'policy_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - } - }, - } - } - - engine_response = { - 'id': 'xxxx-yyyy-zzzz', - 'name': 'test_policy', - 'type': 'test_policy_type-1.0', - 'spec': { - 'type': 'policy_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - }, - }, - } - - req = self._post('/policies', jsonutils.dumps(body)) - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.create(req, body=body) - self.assertEqual(engine_response, resp['policy']) - mock_parse.assert_called_once_with( - 'PolicyCreateRequest', req, body, 'policy') - mock_call.assert_called_with(req.context, 'policy_create', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_create_no_policy(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'not_policy': 'test_policy'} - - req = self._post('/policies', jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad param", str(ex)) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_create_bad_policy(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'policy': {'name': 'fake_name'}} - - req = self._post('/policies', jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("bad spec") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad spec", str(ex)) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_create_with_spec_validation_failed(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'policy': { - 'name': 'test_policy', - 'spec': { - 'type': 'policy_type', - 'version': '1.0', - 'properties': {'param': 'value'} - }, - } - } - req = self._post('/policies', jsonutils.dumps(body)) - obj = mock.Mock() - mock_parse.return_value = obj - err = senlin_exc.InvalidSpec(message="bad spec") - mock_call.side_effect = shared.to_remote_error(err) - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(400, resp.json['code']) - self.assertEqual('InvalidSpec', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'PolicyCreateRequest', mock.ANY, body, 'policy') - mock_call.assert_called_once_with(req.context, 'policy_create', - obj.policy) - - def test_policy_create_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', False) - body = { - 'policy': { - 'name': 'test_policy', - 'spec': { - 'type': 'policy_type', - 'version': '1.0', - 'properties': {'param': 'value'}, - } - } - } - - req = self._post('/policies', jsonutils.dumps(body)) - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_get_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - pid = 'pid' - req = self._get('/policies/%s' % pid) - - engine_resp = {'foo': 'bar'} - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.get(req, policy_id=pid) - - self.assertEqual(engine_resp, result['policy']) - mock_parse.assert_called_once_with( - 'PolicyGetRequest', req, {'identity': pid}) - mock_call.assert_called_with(req.context, 'policy_get', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - pid = 'non-existent-policy' - req = self._get('/policies/%s' % pid) - - error = senlin_exc.ResourceNotFound(type='policy', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, policy_id=pid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - "PolicyGetRequest", mock.ANY, {'identity': pid}) - - def test_policy_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - pid = 'non-existent-policy' - req = self._get('/policies/%s' % pid) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, policy_id=pid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_update_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = { - 'policy': { - 'name': 'policy-2', - } - } - - req = self._put('/policies/%(policy_id)s' % {'policy_id': pid}, - jsonutils.dumps(body)) - - engine_resp = { - u'id': pid, - u'name': u'policy-2', - u'type': u'test_policy_type', - u'spec': { - u'param_1': u'value1', - u'param_2': u'value3', - }, - u'created_time': u'2015-02-25T16:20:13Z', - u'updated_time': None, - } - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.requests = obj - - obj - result = self.controller.update(req, policy_id=pid, body=body) - - expected = {'policy': engine_resp} - self.assertEqual(expected, result) - - mock_parse.assert_called_once_with( - 'PolicyUpdateRequest', req, {'identity': pid, - 'policy': mock.ANY}) - mock_call.assert_called_with(req.context, 'policy_update', - mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_update_with_no_name(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = {'policy': {}} - - req = self._put('/policies/%(pid)s' % {'pid': pid}, - jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, policy_id=pid, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'PolicyUpdateRequest', req, {'identity': pid, - 'policy': mock.ANY}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_update_with_bad_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = {'foo': 'bar'} - req = self._patch('/policies/%(pid)s' % {'pid': pid}, - jsonutils.dumps(body)) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, policy_id=pid, body=body) - - self.assertEqual("Malformed request data, missing 'policy' key in " - "request body.", str(ex)) - self.assertFalse(mock_call.called) - self.assertFalse(mock_parse.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_update_with_unsupported_field(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = { - 'policy': { - 'name': 'new_name_policy', - 'bogus': 'foo' - } - } - - req = self._put('/policies/%(pid)s' % {'pid': pid}, - jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, policy_id=pid, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'PolicyUpdateRequest', req, {'identity': pid, - 'policy': mock.ANY}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_update_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'non-existent-policy' - body = { - 'policy': { - 'name': 'new_policy', - } - } - req = self._patch('/policies/%(policy_id)s' % {'policy_id': pid}, - jsonutils.dumps(body)) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.ResourceNotFound(type='policy', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, policy_id=pid, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_policy_update_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', False) - pid = 'aaaa-bbbb-cccc' - body = { - 'policy': {'name': 'test_policy'}, - } - req = self._put('/policies/%(policy_id)s' % {'policy_id': pid}, - jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, policy_id=pid, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_delete_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - pid = 'FAKE_ID' - req = self._delete('/policies/%s' % pid) - - obj = mock.Mock() - mock_parse.return_value = obj - self.assertRaises(exc.HTTPNoContent, - self.controller.delete, req, policy_id=pid) - - mock_parse.assert_called_once_with( - 'PolicyDeleteRequest', req, {'identity': pid}) - mock_call.assert_called_with( - req.context, 'policy_delete', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_delete_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - pid = 'FAKE_ID' - req = self._delete('/policies/%s' % pid) - - error = senlin_exc.ResourceNotFound(type='policy', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, policy_id=pid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_policy_delete_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', False) - pid = 'FAKE_ID' - req = self._delete('/policies/%s' % pid) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, policy_id=pid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_validate_version_mismatch(self, mock_call, mock_enforce): - body = { - 'policy': {} - } - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.1') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.validate, - req, body=body) - - mock_call.assert_not_called() - self.assertEqual("API version '1.1' is not supported on this " - "method.", str(ex)) - - def test_profile_validate_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', False) - - body = { - 'profile': { - 'name': 'test_policy', - 'spec': { - 'type': 'test_policy_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - }, - }, - 'metadata': {}, - } - } - - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.2') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.validate, - req, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_validate_no_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = {'foo': 'bar'} - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.2') - mock_parse.side_effect = exc.HTTPBadRequest("miss policy") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.validate, - req, body=body) - self.assertEqual("miss policy", str(ex)) - mock_parse.assert_called_once_with( - 'PolicyValidateRequest', req, body, 'policy') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_validate_no_spec(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = { - 'policy': {} - } - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.2') - - mock_parse.side_effect = exc.HTTPBadRequest("miss policy") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.validate, - req, body=body) - self.assertEqual("miss policy", str(ex)) - mock_parse.assert_called_once_with( - 'PolicyValidateRequest', req, body, 'policy') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_validate_invalid_spec(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = { - 'policy': { - 'spec': { - 'type': 'senlin.policy.deletion', - 'version': '1.0' - } - } - } - - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.2') - - msg = 'Spec validation error' - error = senlin_exc.InvalidSpec(message=msg) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.validate, - req, body=body) - - self.assertEqual(400, resp.json['code']) - self.assertEqual('InvalidSpec', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_validate_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - spec = { - 'spec': { - 'properties': {'foo': 'bar'}, - 'type': 'senlin.policy.deletion', - 'version': '1.0' - } - } - body = { - 'policy': spec - } - - req = self._post('/policies/validate', jsonutils.dumps(body), - version='1.2') - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = spec - - result = self.controller.validate(req, body=body) - self.assertEqual(body, result) - - mock_parse.assert_called_once_with( - 'PolicyValidateRequest', req, body, 'policy') - mock_call.assert_called_with( - req.context, 'policy_validate', mock.ANY) diff --git a/senlin/tests/unit/api/openstack/v1/test_policy_types.py b/senlin/tests/unit/api/openstack/v1/test_policy_types.py deleted file mode 100644 index 67d105d41..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_policy_types.py +++ /dev/null @@ -1,256 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import policy_types -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class PolicyTypeControllerTest(shared.ControllerTest, base.SenlinTestCase): - def setUp(self): - super(PolicyTypeControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = policy_types.PolicyTypeController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/policy_types') - - engine_response = [ - {'name': 'senlin.policy.p1', 'version': '1.0', 'attr': 'v1'}, - {'name': 'senlin.policy.p2', 'version': '1.0', 'attr': 'v2'} - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual( - [ - {'name': 'senlin.policy.p1-1.0'}, - {'name': 'senlin.policy.p2-1.0'}, - ], - response['policy_types'] - ) - mock_parse.assert_called_once_with( - 'PolicyTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'policy_type_list', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list_old_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/policy_types', version='1.3') - - engine_response = [ - {'name': 'senlin.policy.p1', 'version': '1.0'}, - {'name': 'senlin.policy.p2', 'version': '1.1'} - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual( - [ - {'name': 'senlin.policy.p1-1.0'}, - {'name': 'senlin.policy.p2-1.1'} - ], - response['policy_types'] - ) - mock_parse.assert_called_once_with( - 'PolicyTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'policy_type_list', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list_new_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/policy_types', version='1.5') - - engine_response = [ - {'name': 'senlin.policy.p1', 'version': '1.0', 'a1': 'v1'}, - {'name': 'senlin.policy.p2', 'version': '1.1', 'a2': 'v2'} - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual(engine_response, response['policy_types']) - mock_parse.assert_called_once_with( - 'PolicyTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'policy_type_list', mock.ANY) - - def test_list_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/policy_types') - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_old_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimplePolicy' - req = self._get('/policy_types/%(type)s' % {'type': type_name}, - version='1.3') - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - }, - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['policy_type']) - mock_parse.assert_called_once_with( - 'PolicyTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'policy_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_new_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimplePolicy' - req = self._get('/policy_types/%(type)s' % {'type': type_name}, - version='1.5') - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - }, - 'support_status': 'faked_status' - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['policy_type']) - mock_parse.assert_called_once_with( - 'PolicyTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'policy_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_type_get(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimplePolicy' - req = self._get('/policy_types/%(type)s' % {'type': type_name}) - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - }, - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['policy_type']) - mock_parse.assert_called_once_with( - 'PolicyTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'policy_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_type_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'BogusPolicyType' - req = self._get('/policy_types/%(type)s' % {'type': type_name}) - - error = senlin_exc.ResourceNotFound(type='policy_type', id=type_name) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, type_name=type_name) - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_policy_type_get_bad_param(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 11 - req = self._get('/policy_types/%(type)s' % {'type': type_name}) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.get, - req, type_name=type_name) - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'PolicyTypeGetRequest', req, {'type_name': type_name}) - self.assertEqual(0, mock_call.call_count) - - def test_policy_type_schema_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - type_name = 'FakePolicyType' - req = self._get('/policy_types/%(type)s' % {'type': type_name}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, type_name=type_name) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) diff --git a/senlin/tests/unit/api/openstack/v1/test_profile_types.py b/senlin/tests/unit/api/openstack/v1/test_profile_types.py deleted file mode 100644 index 65458411c..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_profile_types.py +++ /dev/null @@ -1,350 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import profile_types -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class ProfileTypeControllerTest(shared.ControllerTest, base.SenlinTestCase): - def setUp(self): - super(ProfileTypeControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = profile_types.ProfileTypeController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/profile_types') - - engine_response = [ - {'name': 'os.heat.stack', 'version': '1.0'}, - {'name': 'os.nova.server', 'version': '1.0'} - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual( - [{'name': 'os.heat.stack-1.0'}, {'name': 'os.nova.server-1.0'}], - response['profile_types']) - mock_parse.assert_called_once_with('ProfileTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'profile_type_list', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list_old_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/profile_types', version='1.3') - - engine_response = [ - {'name': 'os.heat.stack', 'version': '1.0', 'attr': 'bar'}, - {'name': 'os.nova.server', 'version': '1.0', 'attr': 'foo'}, - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual( - [{'name': 'os.heat.stack-1.0'}, {'name': 'os.nova.server-1.0'}], - response['profile_types'] - ) - mock_parse.assert_called_once_with('ProfileTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'profile_type_list', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_list_new_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/profile_types', version='1.5') - - engine_response = [ - {'name': 'os.heat.stack', 'version': '1.0', 'attr': 'bar'}, - {'name': 'os.nova.server', 'version': '1.0', 'attr': 'foo'}, - ] - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.index(req) - - self.assertEqual(engine_response, response['profile_types']) - mock_parse.assert_called_once_with('ProfileTypeListRequest', req, {}) - mock_call.assert_called_once_with( - req.context, 'profile_type_list', mock.ANY) - - def test_profile_type_list_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/profile_types') - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_old_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimpleProfile' - req = self._get('/profile_types/%(type)s' % {'type': type_name}, - version='1.3') - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - } - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['profile_type']) - mock_parse.assert_called_once_with( - 'ProfileTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_get_new_version(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimpleProfile' - req = self._get('/profile_types/%(type)s' % {'type': type_name}, - version='1.5') - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - }, - 'support_status': {"1.0": [{"since": "2016.04", - "status": "supported"}]} - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['profile_type']) - mock_parse.assert_called_once_with('ProfileTypeGetRequest', req, - {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_get(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'SimpleProfile' - req = self._get('/profile_types/%(type)s' % {'type': type_name}) - - engine_response = { - 'name': type_name, - 'schema': { - 'Foo': {'type': 'String', 'required': False}, - 'Bar': {'type': 'Integer', 'required': False}, - }, - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.get(req, type_name=type_name) - - self.assertEqual(engine_response, response['profile_type']) - mock_parse.assert_called_once_with( - 'ProfileTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_get', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_get_with_bad_param(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 100 - req = self._get('/profile_types/%(type)s' % {'type': type_name}) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.get, - req, type_name=type_name) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileTypeGetRequest', req, {'type_name': type_name}) - mock_call.assert_not_called() - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - type_name = 'BogusProfileType' - req = self._get('/profile_types/%(type)s' % {'type': type_name}) - - error = senlin_exc.ResourceNotFound(type='profile_type', id=type_name) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, type_name=type_name) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'ProfileTypeGetRequest', mock.ANY, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_get', mock.ANY) - - def test_profile_type_get_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - type_name = 'BogusProfileType' - req = self._get('/profile_types/%(type)s' % {'type': type_name}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, type_name=type_name) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_ops(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'ops', True) - type_name = 'SimpleProfile' - req = self._get('/profile_types/%(type)s/ops' % {'type': type_name}, - version='1.4') - - engine_response = { - 'operations': { - 'Foo': {}, 'Bar': {}, - } - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - response = self.controller.ops(req, type_name=type_name) - - self.assertEqual(engine_response, response) - mock_parse.assert_called_once_with( - 'ProfileTypeOpListRequest', req, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_ops', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_ops_with_bad_param(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'ops', True) - type_name = 100 - req = self._get('/profile_types/%(type)s/ops' % {'type': type_name}, - version='1.4') - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.ops, - req, type_name=type_name) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileTypeOpListRequest', req, {'type_name': type_name}) - mock_call.assert_not_called() - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_ops_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'ops', True) - type_name = 'BogusProfileType' - req = self._get('/profile_types/%(type)s/ops' % {'type': type_name}, - version='1.4') - - error = senlin_exc.ResourceNotFound(type='profile_type', id=type_name) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.ops, - req, type_name=type_name) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'ProfileTypeOpListRequest', mock.ANY, {'type_name': type_name}) - mock_call.assert_called_once_with( - req.context, 'profile_type_ops', mock.ANY) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_type_ops_version_mismatch(self, mock_call, mock_enforce): - type_name = 'fake' - req = self._get('/profile_types/%(type)s/ops' % {'type': type_name}, - version='1.1') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.ops, - req, type_name=type_name) - - self.assertEqual(0, mock_call.call_count) - self.assertEqual("API version '1.1' is not supported on this method.", - str(ex)) - - def test_profile_type_ops_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'ops', False) - type_name = 'BogusProfileType' - req = self._get('/profile_types/%(type)s/ops' % {'type': type_name}, - version='1.4') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.ops, - req, type_name=type_name) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) diff --git a/senlin/tests/unit/api/openstack/v1/test_profiles.py b/senlin/tests/unit/api/openstack/v1/test_profiles.py deleted file mode 100644 index d0ad17824..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_profiles.py +++ /dev/null @@ -1,809 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import profiles -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class ProfileControllerTest(shared.ControllerTest, base.SenlinTestCase): - def setUp(self): - super(ProfileControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = profiles.ProfileController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_index_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/profiles') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'profile-1', - u'type': u'test_profile_type', - u'spec': { - u'param_1': u'value1', - u'param_2': u'value2', - }, - u'created_time': u'2015-02-24T19:17:22Z', - u'updated_time': None, - u'metadata': {}, - } - ] - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['profiles']) - mock_parse.assert_called_once_with( - 'ProfileListRequest', req, {'project_safe': True}) - mock_call.assert_called_once_with(req.context, 'profile_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_index_whitelists_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - marker_uuid = uuidutils.generate_uuid() - params = { - 'name': 'foo', - 'type': 'fake_type', - 'limit': 20, - 'marker': marker_uuid, - 'sort': 'name:asc', - 'global_project': False - } - req = self._get('/profiles', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual([], result['profiles']) - mock_parse.assert_called_once_with( - 'ProfileListRequest', req, - { - 'sort': 'name:asc', - 'name': ['foo'], - 'limit': '20', - 'marker': marker_uuid, - 'type': ['fake_type'], - 'project_safe': True - }) - mock_call.assert_called_once_with(req.context, 'profile_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_index_whitelist_bad_params(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'fake_value' - } - req = self._get('/profiles', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - self.assertEqual("Invalid parameter balrog", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_index_global_project_not_bool(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'global_project': 'No'} - req = self._get('/profiles', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid value 'No' specified for 'global_project'", - str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_index_limit_non_int(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - - params = {'limit': 'abc'} - req = self._get('/profiles', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad limit") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad limit", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileListRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - def test_profile_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/profiles') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, - req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_create_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'profile': { - 'name': 'test_profile', - 'spec': { - 'type': 'test_profile_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - }, - }, - 'metadata': {}, - } - } - - engine_response = { - 'id': 'xxxx-yyyy-zzzz', - 'name': 'test_profile', - 'type': 'test_profile_type', - 'spec': { - 'type': 'test_profile_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - } - }, - 'metadata': {}, - } - - req = self._post('/profiles', jsonutils.dumps(body)) - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.create(req, body=body) - - self.assertEqual(engine_response, resp['profile']) - mock_parse.assert_called_once_with( - 'ProfileCreateRequest', req, body, 'profile') - mock_call.assert_called_once_with( - req.context, 'profile_create', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_create_with_no_profile(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'name': 'test_profile'} - - req = self._post('/profiles', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad body") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad body", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileCreateRequest', mock.ANY, body, 'profile') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_create_with_profile_no_spec(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'profile': {'name': 'test_profile'}} - - req = self._post('/profiles', jsonutils.dumps(body)) - mock_parse.side_effect = exc.HTTPBadRequest("miss spec") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("miss spec", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileCreateRequest', mock.ANY, body, 'profile') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_create_with_bad_type(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - type_name = 'unknown_type' - body = { - 'profile': { - 'name': 'test_profile', - 'spec': { - 'type': type_name, - 'version': '1.0', - 'properties': {'param': 'value'}, - }, - 'metadata': {}, - } - } - req = self._post('/profiles', jsonutils.dumps(body)) - - obj = mock.Mock() - mock_parse.return_value = obj - error = senlin_exc.ResourceNotFound(type='profile_type', id=type_name) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'ProfileCreateRequest', mock.ANY, body, 'profile') - mock_call.assert_called_once_with( - req.context, 'profile_create', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_create_with_spec_validation_failed(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'profile': { - 'name': 'test_profile', - 'spec': { - 'type': 'test_profile_type', - 'version': '1.0', - 'properties': {'param': 'value'}, - }, - 'metadata': {}, - } - } - req = self._post('/profiles', jsonutils.dumps(body)) - obj = mock.Mock() - mock_parse.return_value = obj - - msg = 'Spec validation error (param): value' - error = senlin_exc.InvalidSpec(message=msg) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req, body=body) - - self.assertEqual(400, resp.json['code']) - self.assertEqual('InvalidSpec', resp.json['error']['type']) - mock_parse.assert_called_once_with( - 'ProfileCreateRequest', mock.ANY, body, 'profile') - mock_call.assert_called_once_with( - req.context, 'profile_create', obj) - - def test_profile_create_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', False) - body = { - 'profile': { - 'name': 'test_profile', - 'spec': { - 'type': 'test_profile_type', - 'version': '1.0', - 'properties': {'param': 'value'}, - } - } - } - - req = self._post('/profiles', jsonutils.dumps(body)) - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.create, - req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_get_normal(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - pid = 'aaaa-bbbb-cccc' - req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid}) - - engine_resp = { - u'id': u'aaaa-bbbb-cccc', - u'name': u'profile-1', - u'type': u'test_profile_type', - u'spec': { - u'param_1': u'value1', - u'param_2': u'value2', - }, - u'created_time': u'2015-02-24T19:17:22Z', - u'updated_time': None, - u'metadata': {}, - } - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.get(req, profile_id=pid) - - self.assertEqual(engine_resp, result['profile']) - mock_parse.assert_called_once_with( - 'ProfileGetRequest', req, {'identity': pid}) - mock_call.assert_called_once_with( - req.context, 'profile_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - pid = 'non-existent-profile' - req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid}) - - error = senlin_exc.ResourceNotFound(type='profile', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, profile_id=pid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'ProfileGetRequest', mock.ANY, {'identity': pid}) - mock_call.assert_called_once_with( - req.context, 'profile_get', obj) - - def test_profile_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - pid = 'non-existent-profile' - req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, profile_id=pid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_update_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = { - 'profile': { - 'name': 'profile-2', - 'metadata': { - 'author': 'thomas j', - } - } - } - - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - engine_resp = { - u'id': pid, - u'name': u'profile-2', - u'type': u'test_profile_type', - u'created_time': u'2015-02-25T16:20:13Z', - u'updated_time': None, - u'metadata': {u'author': u'thomas j'}, - } - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.update(req, profile_id=pid, body=body) - - self.assertEqual(engine_resp, result['profile']) - mock_parse.assert_called_once_with( - 'ProfileUpdateRequest', req, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'profile_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_update_no_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = {'foo': 'bar'} - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad body") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, profile_id=pid, body=body) - - self.assertEqual("Malformed request data, missing 'profile' key " - "in request body.", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_update_no_name(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = { - 'profile': {'metadata': {'author': 'thomas j'}} - } - - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - mock_call.return_value = {} - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.update(req, profile_id=pid, body=body) - - self.assertEqual({}, result['profile']) - mock_parse.assert_called_once_with( - 'ProfileUpdateRequest', req, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'profile_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_update_with_unexpected_field(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'aaaa-bbbb-cccc' - body = { - 'profile': { - 'name': 'new_profile', - 'metadata': {'author': 'john d'}, - 'foo': 'bar' - } - } - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, profile_id=pid, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileUpdateRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_update_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - pid = 'non-existent-profile' - body = { - 'profile': { - 'name': 'new_profile', - 'metadata': {'author': 'john d'}, - } - } - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='profile', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, profile_id=pid, - body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_profile_update_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', False) - pid = 'aaaa-bbbb-cccc' - body = { - 'profile': {'name': 'test_profile', 'spec': {'param5': 'value5'}}, - } - req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid}, - jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, profile_id=pid, - body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_delete_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - pid = 'aaaa-bbbb-cccc' - req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid}) - - obj = mock.Mock() - mock_parse.return_value = obj - - self.assertRaises(exc.HTTPNoContent, - self.controller.delete, req, profile_id=pid) - - mock_parse.assert_called_once_with( - 'ProfileDeleteRequest', req, {'identity': pid}) - mock_call.assert_called_once_with(req.context, 'profile_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_delete_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - pid = 'aaaa-bbbb-cccc' - req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid}) - - error = senlin_exc.ResourceNotFound(type='profile', id=pid) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, profile_id=pid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - mock_parse.assert_called_once_with( - 'ProfileDeleteRequest', mock.ANY, {'identity': pid}) - mock_call.assert_called_once_with( - req.context, 'profile_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_delete_resource_in_use(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - pid = 'aaaa-bbbb-cccc' - req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid}) - - error = senlin_exc.ResourceInUse(type='profile', id=pid, - reason='still in use') - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, profile_id=pid) - - self.assertEqual(409, resp.json['code']) - self.assertEqual('ResourceInUse', resp.json['error']['type']) - - def test_profile_delete_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', False) - pid = 'aaaa-bbbb-cccc' - req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, profile_id=pid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_version_mismatch(self, mock_call, mock_parse, - mock_enforce): - body = { - 'profile': {} - } - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.1') - - ex = self.assertRaises(senlin_exc.MethodVersionNotFound, - self.controller.validate, - req, body=body) - - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - self.assertEqual("API version '1.1' is not supported on this " - "method.", str(ex)) - - def test_profile_validate_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', False) - - body = { - 'profile': { - 'name': 'test_profile', - 'spec': { - 'type': 'test_profile_type', - 'version': '1.0', - 'properties': { - 'param_1': 'value1', - 'param_2': 2, - }, - }, - 'metadata': {}, - } - } - - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.validate, - req, body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_no_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = {'foo': 'bar'} - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.validate, - req, body=body) - self.assertEqual("bad param", str(ex)) - - mock_parse.assert_called_once_with( - 'ProfileValidateRequest', req, body, 'profile') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_no_spec(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = { - 'profile': {} - } - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - - mock_parse.side_effect = exc.HTTPBadRequest("miss spec") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.validate, - req, body=body) - self.assertEqual("miss spec", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileValidateRequest', req, body, 'profile') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_unsupported_field(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = { - 'profile': { - 'spec': {'type': 'os.nova.server', - 'version': '1.0'}, - 'foo': 'bar' - } - } - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.validate, - req, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ProfileValidateRequest', req, body, 'profile') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_invalid_spec(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - body = { - 'profile': { - 'spec': { - 'type': 'os.nova.server', - 'version': '1.0' - } - } - } - - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - - msg = 'Spec validation error' - error = senlin_exc.InvalidSpec(message=msg) - mock_call.side_effect = shared.to_remote_error(error) - obj = mock.Mock() - mock_parse.return_value = obj - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.validate, - req, body=body) - - self.assertEqual(400, resp.json['code']) - self.assertEqual('InvalidSpec', resp.json['error']['type']) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_profile_validate_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'validate', True) - spec = { - 'spec': { - 'type': 'os.heat.stack', - 'version': '1.0' - } - } - body = { - 'profile': spec - } - - req = self._post('/profiles/validate', jsonutils.dumps(body), - version='1.2') - - obj = mock.Mock() - mock_parse.return_value = obj - mock_call.return_value = spec - - result = self.controller.validate(req, body=body) - - self.assertEqual(spec, result['profile']) - mock_parse.assert_called_once_with( - 'ProfileValidateRequest', req, body, 'profile') - mock_call.assert_called_with( - req.context, 'profile_validate', obj) diff --git a/senlin/tests/unit/api/openstack/v1/test_receivers.py b/senlin/tests/unit/api/openstack/v1/test_receivers.py deleted file mode 100644 index b196b1a62..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_receivers.py +++ /dev/null @@ -1,744 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils - -from senlin.api.common import util -from senlin.api.middleware import fault -from senlin.api.openstack.v1 import receivers -from senlin.common import exception as senlin_exc -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class ReceiverControllerTest(shared.ControllerTest, base.SenlinTestCase): - def setUp(self): - super(ReceiverControllerTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = receivers.ReceiverController(options=cfgopts) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_normal(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/receivers') - - engine_resp = [ - { - u'id': u'aaaa-bbbb-cccc', - u'name': u'test-receiver', - u'type': u'webhook', - u'user': u'admin', - u'project': u'123456abcd3555', - u'domain': u'default', - u'cluster_id': u'FAKE_CLUSTER', - u'action': u'test-action', - u'actor': { - u'user_id': u'test-user-id', - u'password': u'test-pass', - }, - u'created_time': u'2015-02-24T19:17:22Z', - u'params': {}, - 'channel': { - 'alarm_url': 'http://somewhere/on/earth', - }, - } - ] - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual(engine_resp, result['receivers']) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, mock.ANY) - mock_call.assert_called_with(req.context, 'receiver_list', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_whitelists_params(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - marker = 'cac6d9c1-cb4e-4884-ba2a-3cbc72d84aaf' - params = { - 'limit': 20, - 'marker': marker, - 'sort': 'name:desc', - 'name': 'receiver01', - 'type': 'webhook', - 'cluster_id': '123abc', - 'action': 'CLUSTER_RESIZE', - 'user': 'user123' - } - req = self._get('/receivers', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual([], result['receivers']) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, - { - 'sort': 'name:desc', - 'name': ['receiver01'], - 'action': ['CLUSTER_RESIZE'], - 'limit': '20', - 'marker': marker, - 'cluster_id': ['123abc'], - 'type': ['webhook'], - 'project_safe': True, - 'user': ['user123'] - }) - mock_call.assert_called_with(req.context, 'receiver_list', mock.ANY) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_whitelists_invalid_params(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = { - 'balrog': 'you shall not pass!' - } - req = self._get('/receivers', params=params) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("Invalid parameter balrog", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_invalid_type(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - params = {'type': 'bogus'} - req = self._get('/receivers', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, - { - 'type': ['bogus'], - 'project_safe': True - }) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_invalid_action(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - - params = {'action': 'bogus'} - req = self._get('/receivers', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, - { - 'action': ['bogus'], - 'project_safe': True - }) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_limit_non_int(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - - params = {'limit': 'abc'} - req = self._get('/receivers', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, - { - 'limit': 'abc', - 'project_safe': True - }) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_invalid_sort(self, mock_call, - mock_parse, mock_enforce): - - self._mock_enforce_setup(mock_enforce, 'index', True) - - params = {'sort': 'bogus:foo'} - req = self._get('/receivers', params=params) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.index, req) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, - { - 'sort': 'bogus:foo', - 'project_safe': True - }) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_index_global_project(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - - params = {'global_project': True} - req = self._get('/receivers', params=params) - - mock_call.return_value = [] - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.index(req) - - self.assertEqual([], result['receivers']) - mock_parse.assert_called_once_with( - 'ReceiverListRequest', req, {'project_safe': False}) - mock_call.assert_called_once_with( - req.context, 'receiver_list', obj) - - def test_receiver_index_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', False) - req = self._get('/receivers') - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.index, req) - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_create_success(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'receiver': { - 'name': 'test_receiver', - 'type': 'webhook', - 'cluster_id': 'FAKE_ID', - 'action': 'CLUSTER_RESIZE', - 'actor': { - 'user_id': 'test_user_id', - 'password': 'test_pass', - }, - 'params': { - 'test_param': 'test_value' - }, - } - } - - engine_response = { - 'id': 'xxxx-yyyy-zzzz', - 'name': 'test_receiver', - 'type': 'webhook', - 'cluster_id': 'FAKE_ID', - 'action': 'CLUSTER_RESIZE', - 'actor': { - 'user_id': 'test_user_id', - 'password': 'test_pass', - }, - 'params': { - 'test_param': 'test_value' - }, - 'channel': { - 'alarm_url': 'http://somewhere/on/earth', - }, - } - - req = self._post('/receivers', jsonutils.dumps(body)) - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - resp = self.controller.create(req, body=body) - - self.assertEqual(engine_response, resp['receiver']) - mock_parse.assert_called_once_with( - 'ReceiverCreateRequest', req, body, 'receiver') - mock_call.assert_called_with( - req.context, 'receiver_create', obj.receiver) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_create_with_bad_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = {'name': 'test_receiver'} - - req = self._post('/receivers', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverCreateRequest', req, body, 'receiver') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_create_missing_required_field(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - body = { - 'receiver': { - 'name': 'test_receiver', - 'cluster_id': 'FAKE_CLUSTER', - 'action': 'CLUSTER_RESIZE', - } - } - - req = self._post('/receivers', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("miss type") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("miss type", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverCreateRequest', req, body, 'receiver') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_create_with_bad_type(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - r_type = 'unsupported' - body = { - 'receiver': { - 'name': 'test_receiver', - 'type': r_type, - 'cluster_id': 'FAKE_CLUSTER', - 'action': 'CLUSTER_RESIZE', - } - } - - req = self._post('/receivers', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad type") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad type", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverCreateRequest', req, body, 'receiver') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_create_illegal_action(self, mock_call, - mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'create', True) - action = 'illegal_action' - body = { - 'receiver': { - 'name': 'test_receiver', - 'type': 'webhook', - 'cluster_id': 'FAKE_CLUSTER', - 'action': action, - } - } - req = self._post('/receivers', jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad action") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.create, - req, body=body) - - self.assertEqual("bad action", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverCreateRequest', req, body, 'receiver') - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_get_normal(self, mock_call, mock_parse, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - wid = 'aaaa-bbbb-cccc' - req = self._get('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - engine_resp = { - u'id': u'aaaa-bbbb-cccc', - u'name': u'test-receiver', - u'type': u'webhook', - u'user': u'admin', - u'project': u'123456abcd3555', - u'domain': u'default', - u'cluster_id': u'FAKE_CLUSTER', - u'action': u'test-action', - u'actor': { - u'user_id': u'test-user-id', - u'password': u'test-pass', - }, - u'created_time': u'2015-02-24T19:17:22Z', - u'params': {}, - u'channel': { - u'alarm_url': u'http://somewhere/on/earth', - } - } - - mock_call.return_value = engine_resp - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.get(req, receiver_id=wid) - - self.assertEqual(engine_resp, result['receiver']) - mock_parse.assert_called_once_with( - 'ReceiverGetRequest', req, {'identity': wid}) - mock_call.assert_called_with(req.context, 'receiver_get', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_get_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', True) - wid = 'non-existent-receiver' - req = self._get('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - error = senlin_exc.ResourceNotFound(type='receiver', id=wid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, receiver_id=wid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_receiver_get_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'get', False) - wid = 'non-existent-receiver' - req = self._get('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.get, - req, receiver_id=wid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_update_normal(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - wid = 'aaaa-bbbb-cccc' - body = { - 'receiver': { - 'name': 'receiver-2', - 'params': { - 'count': 10, - } - } - } - - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - - engine_response = { - u'id': wid, - u'name': u'receiver-2', - u'created_time': u'2015-02-25T16:20:13Z', - u'updated_time': None, - u'params': {u'count': 10}, - } - - mock_call.return_value = engine_response - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.update(req, receiver_id=wid, body=body) - - self.assertEqual(engine_response, result['receiver']) - mock_parse.assert_called_once_with( - 'ReceiverUpdateRequest', req, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'receiver_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_update_no_body(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - wid = 'aaaa-bbbb-cccc' - body = {'foo': 'bar'} - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad body") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, receiver_id=wid, body=body) - - self.assertEqual("Malformed request data, missing 'receiver' key " - "in request body.", str(ex)) - self.assertFalse(mock_parse.called) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_update_no_name(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - wid = 'aaaa-bbbb-cccc' - body = { - 'receiver': {'params': {'count': 10}} - } - - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - mock_call.return_value = {} - obj = mock.Mock() - mock_parse.return_value = obj - - result = self.controller.update(req, receiver_id=wid, body=body) - - self.assertEqual({}, result['receiver']) - mock_parse.assert_called_once_with( - 'ReceiverUpdateRequest', req, mock.ANY) - mock_call.assert_called_once_with( - req.context, 'receiver_update', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_update_with_unexpected_field(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - wid = 'aaaa-bbbb-cccc' - body = { - 'receiver': { - 'name': 'receiver-2', - 'params': {'count': 10}, - } - } - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - - mock_parse.side_effect = exc.HTTPBadRequest("bad param") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.update, - req, receiver_id=wid, body=body) - - self.assertEqual("bad param", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverUpdateRequest', req, mock.ANY) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_update_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', True) - wid = 'non-existent-receiver' - body = { - 'receiver': { - 'name': 'receiver-2', - 'params': {'count': 10}, - } - } - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - - error = senlin_exc.ResourceNotFound(type='webhook', id=wid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, receiver_id=wid, - body=body) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_receiver_update_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'update', False) - wid = 'aaaa-bbbb-cccc' - body = { - 'receiver': {'name': 'receiver-2', 'spec': {'param5': 'value5'}}, - } - req = self._put('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - jsonutils.dumps(body)) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.update, - req, profile_id=wid, - body=body) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_delete_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - wid = 'aaaa-bbbb-cccc' - req = self._delete('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - obj = mock.Mock() - mock_parse.return_value = obj - - self.assertRaises(exc.HTTPNoContent, - self.controller.delete, req, receiver_id=wid) - mock_parse.assert_called_once_with( - 'ReceiverDeleteRequest', req, {'identity': wid}) - mock_call.assert_called_once_with( - req.context, 'receiver_delete', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_delete_err_malformed_receiver_id(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - wid = {'k1': 'v1'} - req = self._delete('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - mock_parse.side_effect = exc.HTTPBadRequest("bad identity") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.delete, req, - receiver_id=wid) - self.assertEqual("bad identity", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverDeleteRequest', req, {'identity': wid}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_delete_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', True) - wid = 'aaaa-bbbb-cccc' - req = self._delete('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - error = senlin_exc.ResourceNotFound(type='receiver', id=wid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, receiver_id=wid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) - - def test_receiver_delete_err_denied_policy(self, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'delete', False) - wid = 'aaaa-bbbb-cccc' - req = self._delete('/receivers/%(receiver_id)s' % {'receiver_id': wid}) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.delete, - req, receiver_id=wid) - - self.assertEqual(403, resp.status_int) - self.assertIn('403 Forbidden', str(resp)) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_notify_success(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'notify') - wid = 'aaaa-bbbb-cccc' - req = self._post('/receivers/%(receiver_id)s/notify' % { - 'receiver_id': wid}, None) - - obj = mock.Mock() - mock_parse.return_value = obj - - self.assertRaises(exc.HTTPNoContent, - self.controller.notify, req, receiver_id=wid) - mock_parse.assert_called_once_with( - 'ReceiverNotifyRequest', req, {'identity': wid}) - mock_call.assert_called_with(req.context, 'receiver_notify', obj) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_notify_err_malformed_receiver_id(self, mock_call, - mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'notify', True) - wid = {'k1': 'v1'} - req = self._post('/receivers/%(receiver_id)s' % {'receiver_id': wid}, - None) - - mock_parse.side_effect = exc.HTTPBadRequest("bad identity") - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.notify, req, - receiver_id=wid) - self.assertEqual("bad identity", str(ex)) - mock_parse.assert_called_once_with( - 'ReceiverNotifyRequest', req, {'identity': wid}) - self.assertFalse(mock_call.called) - - @mock.patch.object(util, 'parse_request') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_receiver_notify_not_found(self, mock_call, mock_parse, - mock_enforce): - self._mock_enforce_setup(mock_enforce, 'notify', True) - wid = 'aaaa-bbbb-cccc' - req = self._post('/receivers/%(receiver_id)s/notify' % { - 'receiver_id': wid}, None) - - error = senlin_exc.ResourceNotFound(type='receiver', id=wid) - mock_call.side_effect = shared.to_remote_error(error) - - resp = shared.request_with_middleware(fault.FaultWrapper, - self.controller.notify, - req, receiver_id=wid) - - self.assertEqual(404, resp.json['code']) - self.assertEqual('ResourceNotFound', resp.json['error']['type']) diff --git a/senlin/tests/unit/api/openstack/v1/test_router.py b/senlin/tests/unit/api/openstack/v1/test_router.py deleted file mode 100644 index 0ef83c12f..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_router.py +++ /dev/null @@ -1,454 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import reflection - -from senlin.api.openstack.v1 import router as api_v1 -from senlin.tests.unit.common import base - - -class RoutesTest(base.SenlinTestCase): - - def assertRoute(self, mapper, path, method, action, controller, - params=None): - params = params or {} - route = mapper.match(path, {'REQUEST_METHOD': method}) - self.assertIsNotNone(route) - self.assertEqual(action, route['action']) - obj = route['controller'].controller - obj_name = reflection.get_class_name(obj, fully_qualified=False) - self.assertEqual(controller, obj_name) - del(route['action']) - del(route['controller']) - self.assertEqual(params, route) - - def setUp(self): - super(RoutesTest, self).setUp() - self.m = api_v1.API({}).map - - def test_version_handling(self): - self.assertRoute( - self.m, - '/', - 'GET', - 'version', - 'VersionController') - - def test_profile_types_handling(self): - self.assertRoute( - self.m, - '/profile-types', - 'GET', - 'index', - 'ProfileTypeController') - - self.assertRoute( - self.m, - '/profile-types/test_type', - 'GET', - 'get', - 'ProfileTypeController', - { - 'type_name': 'test_type' - }) - - def test_profile_handling(self): - self.assertRoute( - self.m, - '/profiles', - 'GET', - 'index', - 'ProfileController') - - self.assertRoute( - self.m, - '/profiles', - 'POST', - 'create', - 'ProfileController', - { - 'success': '201', - }) - - self.assertRoute( - self.m, - '/profiles/bbbb', - 'GET', - 'get', - 'ProfileController', - { - 'profile_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/profiles/bbbb', - 'PATCH', - 'update', - 'ProfileController', - { - 'profile_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/profiles/bbbb', - 'DELETE', - 'delete', - 'ProfileController', - { - 'profile_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/profiles/validate', - 'POST', - 'validate', - 'ProfileController') - - def test_policy_types_handling(self): - self.assertRoute( - self.m, - '/policy-types', - 'GET', - 'index', - 'PolicyTypeController') - - self.assertRoute( - self.m, - '/policy-types/test_type', - 'GET', - 'get', - 'PolicyTypeController', - { - 'type_name': 'test_type' - }) - - def test_policy_handling(self): - self.assertRoute( - self.m, - '/policies', - 'GET', - 'index', - 'PolicyController') - - self.assertRoute( - self.m, - '/policies', - 'POST', - 'create', - 'PolicyController', - { - 'success': '201', - }) - - self.assertRoute( - self.m, - '/policies/bbbb', - 'GET', - 'get', - 'PolicyController', - { - 'policy_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/policies/bbbb', - 'PATCH', - 'update', - 'PolicyController', - { - 'policy_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/policies/bbbb', - 'DELETE', - 'delete', - 'PolicyController', - { - 'policy_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/policies/validate', - 'POST', - 'validate', - 'PolicyController') - - def test_cluster_collection(self): - self.assertRoute( - self.m, - '/clusters', - 'GET', - 'index', - 'ClusterController') - - self.assertRoute( - self.m, - '/clusters', - 'POST', - 'create', - 'ClusterController', - { - 'success': '202', - }) - - self.assertRoute( - self.m, - '/clusters/bbbb', - 'GET', - 'get', - 'ClusterController', - { - 'cluster_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/clusters/bbbb', - 'PATCH', - 'update', - 'ClusterController', - { - 'cluster_id': 'bbbb', - 'success': '202', - }) - - self.assertRoute( - self.m, - '/clusters/bbbb/actions', - 'POST', - 'action', - 'ClusterController', - { - 'cluster_id': 'bbbb', - 'success': '202', - }) - - self.assertRoute( - self.m, - '/clusters/bbbb', - 'DELETE', - 'delete', - 'ClusterController', - { - 'cluster_id': 'bbbb', - 'success': '202', - }) - - def test_node_collection(self): - self.assertRoute( - self.m, - '/nodes', - 'GET', - 'index', - 'NodeController') - - self.assertRoute( - self.m, - '/nodes', - 'POST', - 'create', - 'NodeController', - { - 'success': '202' - }) - - self.assertRoute( - self.m, - '/nodes/adopt', - 'POST', - 'adopt', - 'NodeController') - - self.assertRoute( - self.m, - '/nodes/adopt-preview', - 'POST', - 'adopt_preview', - 'NodeController') - - self.assertRoute( - self.m, - '/nodes/bbbb', - 'GET', - 'get', - 'NodeController', - { - 'node_id': 'bbbb', - }) - - self.assertRoute( - self.m, - '/nodes/bbbb', - 'PATCH', - 'update', - 'NodeController', - { - 'node_id': 'bbbb', - 'success': '202', - }) - - self.assertRoute( - self.m, - '/nodes/bbbb/actions', - 'POST', - 'action', - 'NodeController', - { - 'node_id': 'bbbb', - 'success': '202', - }) - - self.assertRoute( - self.m, - '/nodes/bbbb', - 'DELETE', - 'delete', - 'NodeController', - { - 'node_id': 'bbbb', - 'success': '202', - }) - - def test_cluster_policy(self): - self.assertRoute( - self.m, - '/clusters/bbbb/policies', - 'GET', - 'index', - 'ClusterPolicyController', - { - 'cluster_id': 'bbbb', - }) - - self.assertRoute( - self.m, - '/clusters/bbbb/policies/cccc', - 'GET', - 'get', - 'ClusterPolicyController', - { - 'cluster_id': 'bbbb', - 'policy_id': 'cccc' - }) - - def test_action_collection(self): - self.assertRoute( - self.m, - '/actions', - 'GET', - 'index', - 'ActionController') - - self.assertRoute( - self.m, - '/actions', - 'POST', - 'create', - 'ActionController', - { - 'success': '201', - }) - - self.assertRoute( - self.m, - '/actions/bbbb', - 'GET', - 'get', - 'ActionController', - { - 'action_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/actions/bbbb', - 'PATCH', - 'update', - 'ActionController', - { - 'action_id': 'bbbb' - }) - - def test_receiver_collection(self): - self.assertRoute( - self.m, - '/receivers', - 'GET', - 'index', - 'ReceiverController') - - self.assertRoute( - self.m, - '/receivers', - 'POST', - 'create', - 'ReceiverController', - { - 'success': '201', - }) - - self.assertRoute( - self.m, - '/receivers/bbbb', - 'GET', - 'get', - 'ReceiverController', - { - 'receiver_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/receivers/bbbb', - 'DELETE', - 'delete', - 'ReceiverController', - { - 'receiver_id': 'bbbb' - }) - - self.assertRoute( - self.m, - '/receivers/bbbb/notify', - 'POST', - 'notify', - 'ReceiverController', - { - 'receiver_id': 'bbbb' - }) - - def test_webhook_collection(self): - self.assertRoute( - self.m, - '/webhooks/bbbbb/trigger', - 'POST', - 'trigger', - 'WebhookController', - { - 'webhook_id': 'bbbbb', - 'success': '202', - }) - - def test_build_info(self): - self.assertRoute( - self.m, - '/build-info', - 'GET', - 'build_info', - 'BuildInfoController') diff --git a/senlin/tests/unit/api/openstack/v1/test_services.py b/senlin/tests/unit/api/openstack/v1/test_services.py deleted file mode 100644 index 9c8bc1fda..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_services.py +++ /dev/null @@ -1,70 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import iso8601 -from unittest import mock - -from senlin.api.openstack.v1 import services -from senlin.common import policy -from senlin.objects import service as service_obj -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -fake_services_list = [ - mock.Mock(binary='senlin-engine', - host='host1', - id=1, - disabled=False, - topic='senlin-engine', - updated_at=datetime.datetime(2012, 10, 29, 13, 42, 11, - tzinfo=iso8601.UTC), - created_at=datetime.datetime(2014, 10, 29, 13, 42, 11, - tzinfo=iso8601.UTC), - disabled_reason='') -] - - -@mock.patch.object(policy, 'enforce') -class ServicesControllerTest(shared.ControllerTest, base.SenlinTestCase): - - def setUp(self): - super(ServicesControllerTest, self).setUp() - - # Create WSGI controller instance - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = services.ServiceController(options=cfgopts) - - def tearDown(self): - super(ServicesControllerTest, self).tearDown() - - @mock.patch.object(service_obj.Service, 'get_all') - def test_service_index(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'index', True) - req = self._get('/services') - req.context.is_admin = True - - mock_call.return_value = fake_services_list - res_dict = self.controller.index(req) - - response = {'services': [{'topic': 'senlin-engine', - 'binary': 'senlin-engine', 'id': 1, - 'host': 'host1', 'status': 'enabled', - 'state': 'down', 'disabled_reason': '', - 'updated_at': datetime.datetime( - 2012, 10, 29, 13, 42, 11)}]} - self.assertEqual(res_dict, response) diff --git a/senlin/tests/unit/api/openstack/v1/test_version.py b/senlin/tests/unit/api/openstack/v1/test_version.py deleted file mode 100644 index 8a2b88df8..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_version.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.api.openstack.v1 import version -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -class FakeRequest(wsgi.Request): - - @staticmethod - def blank(*args, **kwargs): - kwargs['base_url'] = 'http://localhost/v1' - version = kwargs.pop('version', wsgi.DEFAULT_API_VERSION) - out = wsgi.Request.blank(*args, **kwargs) - out.version_request = vr.APIVersionRequest(version) - return out - - -class VersionControllerTest(shared.ControllerTest, base.SenlinTestCase): - - def setUp(self): - super(VersionControllerTest, self).setUp() - self.controller = version.VersionController({}) - - def test_version(self): - req = self._get('/') - - result = self.controller.version(req) - - response = result['version'] - self.assertEqual('1.0', response['id']) - self.assertEqual('CURRENT', response['status']) - self.assertEqual('2016-01-18T00:00:00Z', response['updated']) - expected = [{ - 'base': 'application/json', - 'type': 'application/vnd.openstack.clustering-v1+json' - }] - self.assertEqual(expected, response['media-types']) - expected = [{ - 'href': 'http://server.test:8004/v1', - 'rel': 'self'}, { - 'href': 'https://docs.openstack.org/api-ref/clustering', - 'rel': 'help', - }] - self.assertEqual(expected, response['links']) - - -class APIVersionTest(base.SenlinTestCase): - - def setUp(self): - super(APIVersionTest, self).setUp() - self.vc = version.VersionController - - def test_min_api_version(self): - res = self.vc.min_api_version() - expected = vr.APIVersionRequest(self.vc._MIN_API_VERSION) - self.assertEqual(expected, res) - - def test_max_api_version(self): - res = self.vc.max_api_version() - expected = vr.APIVersionRequest(self.vc._MAX_API_VERSION) - self.assertEqual(expected, res) - - def test_is_supported(self): - req = mock.Mock() - req.version_request = vr.APIVersionRequest(self.vc._MIN_API_VERSION) - res = self.vc.is_supported(req) - self.assertTrue(res) - - def test_is_supported_min_version(self): - req = FakeRequest.blank('/fake', version='1.1') - - self.assertTrue(self.vc.is_supported(req, '1.0', '1.1')) - self.assertTrue(self.vc.is_supported(req, '1.1', '1.1')) - self.assertFalse(self.vc.is_supported(req, '1.2')) - self.assertFalse(self.vc.is_supported(req, '1.3')) - - def test_is_supported_max_version(self): - req = FakeRequest.blank('/fake', version='2.5') - - self.assertFalse(self.vc.is_supported(req, max_ver='2.4')) - self.assertTrue(self.vc.is_supported(req, max_ver='2.5')) - self.assertTrue(self.vc.is_supported(req, max_ver='2.6')) - - def test_is_supported_min_and_max_version(self): - req = FakeRequest.blank('/fake', version='2.5') - - self.assertFalse(self.vc.is_supported(req, '2.3', '2.4')) - self.assertTrue(self.vc.is_supported(req, '2.3', '2.5')) - self.assertTrue(self.vc.is_supported(req, '2.3', '2.7')) - self.assertTrue(self.vc.is_supported(req, '2.5', '2.7')) - self.assertFalse(self.vc.is_supported(req, '2.6', '2.7')) - self.assertTrue(self.vc.is_supported(req, '2.5', '2.5')) - self.assertFalse(self.vc.is_supported(req, '2.10', '2.1')) diff --git a/senlin/tests/unit/api/openstack/v1/test_webhooks.py b/senlin/tests/unit/api/openstack/v1/test_webhooks.py deleted file mode 100644 index ceeff8028..000000000 --- a/senlin/tests/unit/api/openstack/v1/test_webhooks.py +++ /dev/null @@ -1,173 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from webob import exc - -from oslo_serialization import jsonutils - -from senlin.api.openstack.v1 import webhooks -from senlin.common import policy -from senlin.rpc import client as rpc_client -from senlin.tests.unit.api import shared -from senlin.tests.unit.common import base - - -@mock.patch.object(policy, 'enforce') -class WebhookControllerBaseTest(shared.ControllerTest, base.SenlinTestCase): - WEBHOOK_VERSION = '1' - WEBHOOK_API_MICROVERSION = '1.0' - - def setUp(self): - super(WebhookControllerBaseTest, self).setUp() - - class DummyConfig(object): - bind_port = 8777 - - cfgopts = DummyConfig() - self.controller = webhooks.WebhookController(options=cfgopts) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - body = None - webhook_id = 'test_webhook_id' - action_id = 'test_action_id' - - engine_response = { - 'action': action_id, - } - - req = self._post('/webhooks/test_webhook_id/trigger', - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - mock_call.return_value = engine_response - - resp = self.controller.trigger(req, webhook_id=webhook_id, body=None) - - self.assertEqual(action_id, resp['action']) - self.assertEqual('/actions/test_action_id', resp['location']) - - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger_with_params(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - body = {'params': {'key': 'value'}} - webhook_id = 'test_webhook_id' - - engine_response = {'action': 'FAKE_ACTION'} - - req = self._post('/webhooks/test_webhook_id/trigger', - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - mock_call.return_value = engine_response - - resp = self.controller.trigger(req, webhook_id=webhook_id, body=body) - - self.assertEqual('FAKE_ACTION', resp['action']) - self.assertEqual('/actions/FAKE_ACTION', resp['location']) - - -class WebhookV1ControllerInvalidParamsTest(WebhookControllerBaseTest): - WEBHOOK_VERSION = '1' - WEBHOOK_API_MICROVERSION = '1.0' - - @mock.patch.object(policy, 'enforce') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger_invalid_params(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - webhook_id = 'fake' - body = {"bad": "boo"} - req = self._patch('/webhooks/{}/trigger'.format(webhook_id), - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.trigger, - req, webhook_id=webhook_id, body=body) - - self.assertEqual( - "Additional properties are not allowed ('bad' was unexpected)", - str(ex)) - self.assertFalse(mock_call.called) - - @mock.patch.object(policy, 'enforce') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger_invalid_json(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - webhook_id = 'fake' - body = {"params": "boo"} - req = self._patch('/webhooks/{}/trigger'.format(webhook_id), - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - - ex = self.assertRaises(exc.HTTPBadRequest, - self.controller.trigger, - req, webhook_id=webhook_id, body=body) - self.assertEqual("The value (boo) is not a valid JSON.", - str(ex)) - self.assertFalse(mock_call.called) - - -class WebhookV1ControllerValidParamsTest(WebhookControllerBaseTest): - WEBHOOK_VERSION = '1' - WEBHOOK_API_MICROVERSION = '1.10' - - @mock.patch.object(policy, 'enforce') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger_extra_params(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - webhook_id = 'fake' - body = {"bad": "boo"} - engine_response = {'action': 'FAKE_ACTION'} - mock_call.return_value = engine_response - req = self._patch('/webhooks/{}/trigger'.format(webhook_id), - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - - resp = self.controller.trigger(req, webhook_id=webhook_id, body=body) - - self.assertEqual('FAKE_ACTION', resp['action']) - self.assertEqual('/actions/FAKE_ACTION', resp['location']) - - @mock.patch.object(policy, 'enforce') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test_webhook_trigger_non_json_params(self, mock_call, mock_enforce): - self._mock_enforce_setup(mock_enforce, 'trigger', True) - webhook_id = 'fake' - body = {"params": "boo"} - engine_response = {'action': 'FAKE_ACTION'} - mock_call.return_value = engine_response - req = self._patch('/webhooks/{}/trigger'.format(webhook_id), - jsonutils.dumps(body), - version=self.WEBHOOK_API_MICROVERSION, - params={'V': self.WEBHOOK_VERSION}) - - resp = self.controller.trigger(req, webhook_id=webhook_id, body=body) - - self.assertEqual('FAKE_ACTION', resp['action']) - self.assertEqual('/actions/FAKE_ACTION', resp['location']) - - -class WebhookV2ControllerTest(WebhookV1ControllerValidParamsTest): - WEBHOOK_VERSION = '2' - WEBHOOK_API_MICROVERSION = '1.0' - - -class WebhookV2_110_ControllerTest(WebhookV1ControllerValidParamsTest): - WEBHOOK_VERSION = '2' - WEBHOOK_API_MICROVERSION = '1.10' diff --git a/senlin/tests/unit/api/shared.py b/senlin/tests/unit/api/shared.py deleted file mode 100644 index b7266ecc0..000000000 --- a/senlin/tests/unit/api/shared.py +++ /dev/null @@ -1,135 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob - -from oslo_config import cfg -from oslo_messaging._drivers import common as rpc_common -from oslo_utils import encodeutils - -from senlin.api.common import version_request as vr -from senlin.api.common import wsgi -from senlin.common import consts -from senlin.tests.unit.common import utils - - -def request_with_middleware(middleware, func, req, *args, **kwargs): - - @webob.dec.wsgify - def _app(req): - return func(req, *args, **kwargs) - - resp = middleware(_app).process_request(req) - return resp - - -def to_remote_error(error): - """Prepend the given exception with the _Remote suffix.""" - - exc_info = (type(error), error, None) - serialized = rpc_common.serialize_remote_exception(exc_info) - remote_error = rpc_common.deserialize_remote_exception( - serialized, ["senlin.common.exception"]) - return remote_error - - -class ControllerTest(object): - """Common utilities for testing API Controllers.""" - - def __init__(self, *args, **kwargs): - super(ControllerTest, self).__init__(*args, **kwargs) - - cfg.CONF.set_default('host', 'server.test') - self.topic = consts.CONDUCTOR_TOPIC - self.api_version = '1.0' - self.project = 'PROJ' - self.mock_enforce = None - - def _environ(self, path): - return { - 'SERVER_NAME': 'server.test', - 'SERVER_PORT': 8004, - 'SCRIPT_NAME': '', - 'PATH_INFO': '/%s' % self.project + path, - 'wsgi.url_scheme': 'http', - } - - def _simple_request(self, path, params=None, method='GET', version=None): - environ = self._environ(path) - environ['REQUEST_METHOD'] = method - - if params: - qs = "&".join(["=".join([k, str(params[k])]) for k in params]) - environ['QUERY_STRING'] = qs - - req = wsgi.Request(environ) - req.context = utils.dummy_context('api_test_user', self.project) - self.context = req.context - ver = version if version else wsgi.DEFAULT_API_VERSION - req.version_request = vr.APIVersionRequest(ver) - return req - - def _get(self, path, params=None, version=None): - return self._simple_request(path, params=params, version=version) - - def _delete(self, path, params=None, version=None): - return self._simple_request(path, params=params, method='DELETE') - - def _data_request(self, path, data, content_type='application/json', - method='POST', version=None, params=None): - environ = self._environ(path) - environ['REQUEST_METHOD'] = method - - if params: - qs = "&".join(["=".join([k, str(params[k])]) for k in params]) - environ['QUERY_STRING'] = qs - - req = wsgi.Request(environ) - req.context = utils.dummy_context('api_test_user', self.project) - self.context = req.context - ver = version if version else wsgi.DEFAULT_API_VERSION - req.version_request = vr.APIVersionRequest(ver) - req.body = encodeutils.safe_encode(data) if data else None - return req - - def _post(self, path, data, content_type='application/json', version=None, - params=None): - return self._data_request(path, data, content_type, version=version, - params=params) - - def _put(self, path, data, content_type='application/json', version=None): - return self._data_request(path, data, content_type, method='PUT', - version=version) - - def _patch(self, path, data, params=None, content_type='application/json', - version=None): - return self._data_request(path, data, content_type, method='PATCH', - version=version, params=params) - - def tearDown(self): - # Common tearDown to assert that policy enforcement happens for all - # controller actions - if self.mock_enforce: - rule = "%s:%s" % (self.controller.REQUEST_SCOPE, self.action) - self.mock_enforce.assert_called_with( - context=self.context, - target={}, rule=rule) - self.assertEqual(self.expected_request_count, - len(self.mock_enforce.call_args_list)) - super(ControllerTest, self).tearDown() - - def _mock_enforce_setup(self, mocker, action, allowed=True, - expected_request_count=1): - self.mock_enforce = mocker - self.action = action - self.mock_enforce.return_value = allowed - self.expected_request_count = expected_request_count diff --git a/senlin/tests/unit/cmd/__init__.py b/senlin/tests/unit/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/cmd/test_conductor.py b/senlin/tests/unit/cmd/test_conductor.py deleted file mode 100644 index 182833758..000000000 --- a/senlin/tests/unit/cmd/test_conductor.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg - -from senlin.cmd import conductor -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -from senlin.conductor import service -from senlin.tests.unit.common import base - -CONF = cfg.CONF - - -class TestConductor(base.SenlinTestCase): - def setUp(self): - super(TestConductor, self).setUp() - - @mock.patch('oslo_log.log.setup') - @mock.patch('oslo_log.log.set_defaults') - @mock.patch('oslo_service.service.launch') - @mock.patch.object(config, 'parse_args') - @mock.patch.object(messaging, 'setup') - @mock.patch.object(profiler, 'setup') - @mock.patch.object(service, 'ConductorService') - def test_main(self, mock_service, mock_profiler_setup, - mock_messaging_setup, mock_parse_args, mock_launch, - mock_log_set_defaults, mock_log_setup): - conductor.main() - - mock_parse_args.assert_called_once() - mock_log_setup.assert_called_once() - mock_log_set_defaults.assert_called_once() - mock_messaging_setup.assert_called_once() - mock_profiler_setup.assert_called_once() - - mock_service.assert_called_once_with( - mock.ANY, consts.CONDUCTOR_TOPIC - ) - - mock_launch.assert_called_once_with( - mock.ANY, mock.ANY, workers=1, restart_method='mutate' - ) diff --git a/senlin/tests/unit/cmd/test_engine.py b/senlin/tests/unit/cmd/test_engine.py deleted file mode 100644 index 2a4dad072..000000000 --- a/senlin/tests/unit/cmd/test_engine.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg - -from senlin.cmd import engine -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -from senlin.engine import service -from senlin.tests.unit.common import base - -CONF = cfg.CONF - - -class TestEngine(base.SenlinTestCase): - def setUp(self): - super(TestEngine, self).setUp() - - @mock.patch('oslo_log.log.setup') - @mock.patch('oslo_log.log.set_defaults') - @mock.patch('oslo_service.service.launch') - @mock.patch.object(config, 'parse_args') - @mock.patch.object(messaging, 'setup') - @mock.patch.object(profiler, 'setup') - @mock.patch.object(service, 'EngineService') - def test_main(self, mock_service, mock_profiler_setup, - mock_messaging_setup, mock_parse_args, mock_launch, - mock_log_set_defaults, mock_log_setup): - engine.main() - - mock_parse_args.assert_called_once() - mock_log_setup.assert_called_once() - mock_log_set_defaults.assert_called_once() - mock_messaging_setup.assert_called_once() - mock_profiler_setup.assert_called_once() - - mock_service.assert_called_once_with( - mock.ANY, consts.ENGINE_TOPIC - ) - - mock_launch.assert_called_once_with( - mock.ANY, mock.ANY, workers=1, restart_method='mutate' - ) diff --git a/senlin/tests/unit/cmd/test_health_manager.py b/senlin/tests/unit/cmd/test_health_manager.py deleted file mode 100644 index d941d3413..000000000 --- a/senlin/tests/unit/cmd/test_health_manager.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg - -from senlin.cmd import health_manager -from senlin.common import config -from senlin.common import consts -from senlin.common import messaging -from senlin.common import profiler -from senlin.health_manager import service -from senlin.tests.unit.common import base - -CONF = cfg.CONF - - -class TestHealthManager(base.SenlinTestCase): - def setUp(self): - super(TestHealthManager, self).setUp() - - @mock.patch('oslo_log.log.setup') - @mock.patch('oslo_log.log.set_defaults') - @mock.patch('oslo_service.service.launch') - @mock.patch.object(config, 'parse_args') - @mock.patch.object(messaging, 'setup') - @mock.patch.object(profiler, 'setup') - @mock.patch.object(service, 'HealthManagerService') - def test_main(self, mock_service, mock_profiler_setup, - mock_messaging_setup, mock_parse_args, mock_launch, - mock_log_set_defaults, mock_log_setup): - health_manager.main() - - mock_parse_args.assert_called_once() - mock_log_setup.assert_called_once() - mock_log_set_defaults.assert_called_once() - mock_messaging_setup.assert_called_once() - mock_profiler_setup.assert_called_once() - - mock_service.assert_called_once_with( - mock.ANY, consts.HEALTH_MANAGER_TOPIC - ) - - mock_launch.assert_called_once_with( - mock.ANY, mock.ANY, workers=1, restart_method='mutate' - ) diff --git a/senlin/tests/unit/cmd/test_status.py b/senlin/tests/unit/cmd/test_status.py deleted file mode 100644 index 0100e94f1..000000000 --- a/senlin/tests/unit/cmd/test_status.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2018 NEC, Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_upgradecheck.upgradecheck import Code - -from senlin.cmd import status -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestUpgradeChecks(base.SenlinTestCase): - - def setUp(self): - super(TestUpgradeChecks, self).setUp() - self.ctx = utils.dummy_context() - self.cmd = status.Checks() - - self.healthpolv1_0_data = { - 'name': 'test_healthpolicy', - 'type': 'senlin.policy.health-1.0', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': None, - } - - self.healthpolv1_1_data = { - 'name': 'test_healthpolicy', - 'type': 'senlin.policy.health-1.1', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': None, - } - - self.scalepol_data = { - 'name': 'test_scalepolicy', - 'type': 'senlin.policy.scaling-1.0', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': None, - } - - def test__check_healthpolicy_success(self): - healthpolv1_1 = db_api.policy_create(self.ctx, self.healthpolv1_1_data) - self.addCleanup(db_api.policy_delete, self.ctx, healthpolv1_1.id) - - scalepol = db_api.policy_create(self.ctx, self.scalepol_data) - self.addCleanup(db_api.policy_delete, self.ctx, scalepol.id) - - check_result = self.cmd._check_healthpolicy() - self.assertEqual(Code.SUCCESS, check_result.code) - - def test__check_healthpolicy_failed(self): - healthpolv1_0 = db_api.policy_create(self.ctx, self.healthpolv1_0_data) - self.addCleanup(db_api.policy_delete, self.ctx, healthpolv1_0.id) - - scalepol = db_api.policy_create(self.ctx, self.scalepol_data) - self.addCleanup(db_api.policy_delete, self.ctx, scalepol.id) - - check_result = self.cmd._check_healthpolicy() - self.assertEqual(Code.FAILURE, check_result.code) diff --git a/senlin/tests/unit/common/__init__.py b/senlin/tests/unit/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/common/base.py b/senlin/tests/unit/common/base.py deleted file mode 100644 index 43ce4a58d..000000000 --- a/senlin/tests/unit/common/base.py +++ /dev/null @@ -1,191 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import os -import tempfile -import time - -import fixtures -from oslo_config import cfg -from oslo_db import options -from oslo_log import log as logging -from oslo_serialization import jsonutils -import shutil -import testscenarios -import testtools - -from senlin.common import messaging -from senlin.db import api as db_api -from senlin.engine import service - - -TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN, - 'sqlalchemy': logging.WARN} -_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s" -_TRUE_VALUES = ('True', 'true', '1', 'yes') - - -class FakeLogMixin(object): - def setup_logging(self): - # Assign default logs to self.LOG so we can still - # assert on senlin logs. - default_level = logging.INFO - if os.environ.get('OS_DEBUG') in _TRUE_VALUES: - default_level = logging.DEBUG - - self.LOG = self.useFixture( - fixtures.FakeLogger(level=default_level, format=_LOG_FORMAT)) - base_list = set([nlog.split('.')[0] for nlog in - logging.getLogger().logger.manager.loggerDict]) - for base in base_list: - if base in TEST_DEFAULT_LOGLEVELS: - self.useFixture(fixtures.FakeLogger( - level=TEST_DEFAULT_LOGLEVELS[base], - name=base, format=_LOG_FORMAT)) - elif base != 'senlin': - self.useFixture(fixtures.FakeLogger( - name=base, format=_LOG_FORMAT)) - - -class DatabaseFixture(fixtures.Fixture): - fixture = None - - @staticmethod - def mktemp(): - tmpfs_path = '/dev/shm' - if not os.path.isdir(tmpfs_path): - tmpfs_path = '/tmp' - return tempfile.mkstemp( - prefix='senlin-', suffix='.sqlite', dir=tmpfs_path)[1] - - @staticmethod - def get_fixture(): - if not DatabaseFixture.fixture: - DatabaseFixture.fixture = DatabaseFixture() - return DatabaseFixture.fixture - - def __init__(self): - super(DatabaseFixture, self).__init__() - self.golden_path = self.mktemp() - self.golden_url = 'sqlite:///%s' % self.golden_path - - db_api.db_sync(self.golden_url) - - self.working_path = self.mktemp() - self.working_url = 'sqlite:///%s' % self.working_path - - def setUp(self): - super(DatabaseFixture, self).setUp() - shutil.copy(self.golden_path, self.working_path) - - -class SenlinTestCase(testscenarios.WithScenarios, - testtools.TestCase, FakeLogMixin): - - TIME_STEP = 0.1 - - def setUp(self): - super(SenlinTestCase, self).setUp() - self.setup_logging() - service.ENABLE_SLEEP = False - self.useFixture(fixtures.MonkeyPatch( - 'senlin.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS', - True)) - - def enable_sleep(): - service.ENABLE_SLEEP = True - - self.addCleanup(enable_sleep) - self.addCleanup(cfg.CONF.reset) - - messaging.setup("fake://", optional=True) - self.addCleanup(messaging.cleanup) - - self.db_fixture = self.useFixture(DatabaseFixture.get_fixture()) - - options.cfg.set_defaults( - options.database_opts, sqlite_synchronous=False - ) - options.set_defaults(cfg.CONF, connection=self.db_fixture.working_url) - - def stub_wallclock(self): - # Overrides scheduler wallclock to speed up tests expecting timeouts. - self._wallclock = time.time() - - def fake_wallclock(): - self._wallclock += self.TIME_STEP - return self._wallclock - - self.m.StubOutWithMock(service, 'wallclock') - service.wallclock = fake_wallclock - - def patchobject(self, obj, attr, **kwargs): - mockfixture = self.useFixture(fixtures.MockPatchObject(obj, attr, - **kwargs)) - return mockfixture.mock - - # NOTE(pshchelo): this overrides the testtools.TestCase.patch method - # that does simple monkey-patching in favor of mock's patching - def patch(self, target, **kwargs): - mockfixture = self.useFixture(fixtures.MockPatch(target, **kwargs)) - return mockfixture.mock - - def assertJsonEqual(self, expected, observed): - """Asserts that 2 complex data structures are json equivalent. - - This code is from Nova. - """ - if isinstance(expected, str): - expected = jsonutils.loads(expected) - if isinstance(observed, str): - observed = jsonutils.loads(observed) - - def sort_key(x): - if isinstance(x, (set, list)) or isinstance(x, datetime.datetime): - return str(x) - if isinstance(x, dict): - items = ((sort_key(k), sort_key(v)) for k, v in x.items()) - return sorted(items) - return x - - def inner(expected, observed): - if isinstance(expected, dict) and isinstance(observed, dict): - self.assertEqual(len(expected), len(observed)) - expected_keys = sorted(expected) - observed_keys = sorted(observed) - self.assertEqual(expected_keys, observed_keys) - - for key in list(expected.keys()): - inner(expected[key], observed[key]) - elif (isinstance(expected, (list, tuple, set)) and - isinstance(observed, (list, tuple, set))): - self.assertEqual(len(expected), len(observed)) - - expected_values_iter = iter(sorted(expected, key=sort_key)) - observed_values_iter = iter(sorted(observed, key=sort_key)) - - for i in range(len(expected)): - inner(next(expected_values_iter), - next(observed_values_iter)) - else: - self.assertEqual(expected, observed) - - try: - inner(expected, observed) - except testtools.matchers.MismatchError as e: - inner_mismatch = e.mismatch - # inverting the observed / expected because testtools - # error messages assume expected is second. Possibly makes - # reading the error messages less confusing. - raise testtools.matchers.MismatchError( - observed, expected, inner_mismatch, verbose=True) diff --git a/senlin/tests/unit/common/utils.py b/senlin/tests/unit/common/utils.py deleted file mode 100644 index 56b234824..000000000 --- a/senlin/tests/unit/common/utils.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import random -import string - -from oslo_utils import timeutils - -from senlin.common import context -from senlin import objects - - -def random_name(): - return ''.join(random.choice(string.ascii_uppercase) - for x in range(10)) - - -def dummy_context(user=None, project=None, password=None, roles=None, - user_id=None, trust_id=None, region_name=None, domain=None, - is_admin=False, api_version=None): - - roles = roles or [] - return context.RequestContext.from_dict({ - 'project_id': project or 'test_project_id', - 'user_id': user_id or 'test_user_id', - 'user_name': user or 'test_username', - 'password': password or 'password', - 'roles': roles or [], - 'is_admin': is_admin, - 'auth_url': 'http://server.test:5000/v2.0', - 'auth_token': 'abcd1234', - 'trust_id': trust_id or 'trust_id', - 'region_name': region_name or 'region_one', - 'domain_id': domain or '', - 'api_version': api_version or '1.2', - }) - - -def create_profile(context, profile_id): - values = { - 'id': profile_id, - 'context': context.to_dict(), - 'type': 'os.nova.server-1.0', - 'name': 'test-profile', - 'spec': { - 'type': 'os.nova.server', - 'version': '1.0', - }, - 'created_at': timeutils.utcnow(True), - 'user': context.user_id, - 'project': context.project_id, - } - return objects.Profile.create(context, values) - - -def create_cluster(context, cluster_id, profile_id, **kwargs): - values = { - 'id': cluster_id, - 'profile_id': profile_id, - 'name': 'test-cluster', - 'next_index': 1, - 'min_size': 1, - 'max_size': 5, - 'desired_capacity': 3, - 'status': 'ACTIVE', - 'init_at': timeutils.utcnow(True), - 'user': context.user_id, - 'project': context.project_id, - } - values.update(kwargs) - return objects.Cluster.create(context, values) - - -def create_node(context, node_id, profile_id, cluster_id, physical_id=None): - values = { - 'id': node_id, - 'name': 'node1', - 'profile_id': profile_id, - 'cluster_id': cluster_id or '', - 'physical_id': physical_id, - 'index': 2, - 'init_at': timeutils.utcnow(True), - 'created_at': timeutils.utcnow(True), - 'role': 'test_node', - 'status': 'ACTIVE', - 'user': context.user_id, - 'project': context.project_id, - } - return objects.Node.create(context, values) - - -def create_policy(context, policy_id, name=None): - values = { - 'id': policy_id, - 'name': name or 'test_policy', - 'type': 'senlin.policy.dummy-1.0', - 'spec': { - 'type': 'senlin.policy.dummy', - 'version': '1.0', - 'properties': { - 'key1': 'value1', - 'key2': 2 - } - }, - 'created_at': timeutils.utcnow(True), - 'user': context.user_id, - 'project': context.project_id, - } - return objects.Policy.create(context, values) diff --git a/senlin/tests/unit/conductor/__init__.py b/senlin/tests/unit/conductor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/conductor/service/__init__.py b/senlin/tests/unit/conductor/service/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/conductor/service/test_actions.py b/senlin/tests/unit/conductor/service/test_actions.py deleted file mode 100644 index a31c7258c..000000000 --- a/senlin/tests/unit/conductor/service/test_actions.py +++ /dev/null @@ -1,240 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine.actions import base as ab -from senlin.objects import action as ao -from senlin.objects import cluster as co -from senlin.objects.requests import actions as orao -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ActionTest(base.SenlinTestCase): - def setUp(self): - super(ActionTest, self).setUp() - self.ctx = utils.dummy_context(project='action_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(ao.Action, 'get_all') - def test_action_list(self, mock_get): - x_1 = mock.Mock() - x_1.to_dict.return_value = {'k': 'v1'} - x_2 = mock.Mock() - x_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [x_1, x_2] - - req = orao.ActionListRequest() - result = self.svc.action_list(self.ctx, req.obj_to_primitive()) - expected = [{'k': 'v1'}, {'k': 'v2'}] - self.assertEqual(expected, result) - - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(ao.Action, 'get_all') - def test_action_list_with_params(self, mock_get): - x_1 = mock.Mock() - x_1.to_dict.return_value = {'status': 'READY'} - x_2 = mock.Mock() - x_2.to_dict.return_value = {'status': 'SUCCESS'} - mock_get.return_value = [x_1, x_2] - - req = orao.ActionListRequest(status=['READY', 'SUCCEEDED'], - limit=100, - sort='status', - project_safe=True) - result = self.svc.action_list(self.ctx, req.obj_to_primitive()) - expected = [{'status': 'READY'}, {'status': 'SUCCESS'}] - self.assertEqual(expected, result) - - filters = {'status': ['READY', 'SUCCEEDED']} - mock_get.assert_called_once_with(self.ctx, - filters=filters, - limit=100, - sort='status', - project_safe=True - ) - - def test_action_list_with_bad_params(self): - req = orao.ActionListRequest(project_safe=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.action_list, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - @mock.patch.object(ao.Action, 'get_all') - def test_action_list_with_Auth(self, mock_get): - mock_get.return_value = [] - - req = orao.ActionListRequest(project_safe=True) - result = self.svc.action_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - self.ctx.is_admin = True - - mock_get.reset_mock() - req = orao.ActionListRequest(project_safe=True) - result = self.svc.action_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - mock_get.reset_mock() - req = orao.ActionListRequest(project_safe=False) - result = self.svc.action_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=False) - - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_action_create(self, mock_find, mock_action): - mock_find.return_value = mock.Mock(id='FAKE_CLUSTER') - mock_action.return_value = 'ACTION_ID' - - req = orao.ActionCreateRequestBody(name='a1', cluster_id='C1', - action='CLUSTER_CREATE') - - result = self.svc.action_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'CLUSTER_CREATE', - name='a1', - cluster_id='FAKE_CLUSTER', - cause=consts.CAUSE_RPC, - status=ab.Action.READY, - inputs={}) - - @mock.patch.object(co.Cluster, 'find') - def test_action_create_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', id='C1') - - req = orao.ActionCreateRequestBody(name='NODE1', - cluster_id='C1') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.action_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Cannot find the given cluster: C1.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'C1') - - @mock.patch.object(ao.Action, 'find') - def test_action_get(self, mock_find): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_obj.to_dict.return_value = {'k': 'v'} - - req = orao.ActionGetRequest(identity='ACTION_ID') - result = self.svc.action_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'k': 'v'}, result) - mock_find.assert_called_once_with(self.ctx, 'ACTION_ID') - - @mock.patch.object(ao.Action, 'find') - def test_action_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='action', id='Bogus') - req = orao.ActionGetRequest(identity='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.action_get, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(ab.Action, 'delete') - @mock.patch.object(ao.Action, 'find') - def test_action_delete(self, mock_find, mock_delete): - x_obj = mock.Mock() - x_obj.id = 'FAKE_ID' - mock_find.return_value = x_obj - mock_delete.return_value = None - - req = orao.ActionDeleteRequest(identity='ACTION_ID') - result = self.svc.action_delete(self.ctx, req.obj_to_primitive()) - self.assertIsNone(result) - mock_find.assert_called_once_with(self.ctx, 'ACTION_ID') - mock_delete.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(ab.Action, 'delete') - @mock.patch.object(ao.Action, 'find') - def test_action_delete_resource_busy(self, mock_find, mock_delete): - x_obj = mock.Mock() - x_obj.id = 'FAKE_ID' - mock_find.return_value = x_obj - ex = exc.EResourceBusy(type='action', id='FAKE_ID') - mock_delete.side_effect = ex - - req = orao.ActionDeleteRequest(identity='ACTION_ID') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.action_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - self.assertEqual("The action 'ACTION_ID' cannot be deleted: still " - "in one of WAITING, RUNNING or SUSPENDED state.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'ACTION_ID') - mock_delete.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(ao.Action, 'find') - def test_action_delete_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='action', id='Bogus') - - req = orao.ActionDeleteRequest(identity='ACTION_ID') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.action_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(ab.Action, 'load') - def test_action_update(self, mock_load): - x_obj = mock.Mock() - x_obj.id = 'FAKE_ID' - x_obj.signal_cancel = mock.Mock() - x_obj.SIG_CANCEL = 'CANCEL' - mock_load.return_value = x_obj - - req = orao.ActionUpdateRequest(identity='ACTION_ID', - status='CANCELLED', force=False) - - result = self.svc.action_update(self.ctx, req.obj_to_primitive()) - self.assertIsNone(result) - - mock_load.assert_called_with(self.ctx, 'ACTION_ID', project_safe=False) - x_obj.signal_cancel.assert_called_once_with() - - @mock.patch.object(ab.Action, 'load') - def test_action_update_unknown_action(self, mock_load): - x_obj = mock.Mock() - x_obj.id = 'FAKE_ID' - x_obj.signal_cancel = mock.Mock() - mock_load.return_value = x_obj - - req = orao.ActionUpdateRequest(identity='ACTION_ID', - status='FOO') - ex = self.assertRaises(rpc.ExpectedException, self.svc.action_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - - mock_load.assert_not_called() - x_obj.signal_cancel.assert_not_called() diff --git a/senlin/tests/unit/conductor/service/test_cluster_op.py b/senlin/tests/unit/conductor/service/test_cluster_op.py deleted file mode 100644 index 6d964241b..000000000 --- a/senlin/tests/unit/conductor/service/test_cluster_op.py +++ /dev/null @@ -1,289 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine.actions import base as am -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.objects.requests import clusters as orco -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ClusterOpTest(base.SenlinTestCase): - def setUp(self): - super(ClusterOpTest, self).setUp() - - self.ctx = utils.dummy_context(project='cluster_op_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'ids_by_cluster') - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op(self, mock_find, mock_cluster, mock_nodes, mock_action, - mock_start): - x_db_cluster = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock(id='12345678AB') - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - params = {'style': 'tango'} - filters = {'role': 'slave'} - mock_nodes.return_value = ['NODE1', 'NODE2'] - req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER', - operation='dance', - params=params, - filters=filters) - - result = self.svc.cluster_op(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - x_schema.validate.assert_called_once_with({'style': 'tango'}) - mock_nodes.assert_called_once_with(self.ctx, '12345678AB', - filters={'role': 'slave'}) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.CLUSTER_OPERATION, - name='cluster_dance_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - 'operation': 'dance', - 'params': {'style': 'tango'}, - 'nodes': ['NODE1', 'NODE2'] - } - ) - mock_start.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound( - type='cluster', id='Bogus') - req = orco.ClusterOperationRequest(identity='Bogus', operation='dance') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_unsupported_operation(self, mock_find, mock_cluster): - x_db_cluster = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}, type='cow') - x_cluster = mock.Mock() - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - req = orco.ClusterOperationRequest(identity='node1', operation='swim') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The requested operation 'swim' is not supported " - "by the profile type 'cow'.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'node1') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_bad_parameters(self, mock_find, mock_cluster): - x_db_cluster = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_schema.validate.side_effect = exc.ESchema(message='Boom') - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock() - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - req = orco.ClusterOperationRequest(identity='node1', operation='dance', - params={'style': 'tango'}) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Boom.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'node1') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - x_schema.validate.assert_called_once_with({'style': 'tango'}) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'ids_by_cluster') - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_no_parameters(self, mock_find, mock_cluster, - mock_nodes, mock_action, mock_start): - x_db_cluster = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock(id='12345678AB') - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - filters = {'role': 'slave'} - mock_nodes.return_value = ['NODE1', 'NODE2'] - req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER', - operation='dance', - filters=filters) - - result = self.svc.cluster_op(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - self.assertEqual(0, x_schema.validate.call_count) - mock_nodes.assert_called_once_with(self.ctx, '12345678AB', - filters={'role': 'slave'}) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.CLUSTER_OPERATION, - name='cluster_dance_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - 'operation': 'dance', - 'params': {}, - 'nodes': ['NODE1', 'NODE2'] - } - ) - mock_start.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'ids_by_cluster') - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_no_filters(self, mock_find, mock_cluster, - mock_nodes, mock_action, mock_start): - x_db_cluster = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock(id='12345678AB') - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - mock_nodes.return_value = ['NODE1', 'NODE2'] - req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER', - operation='dance') - - result = self.svc.cluster_op(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - self.assertEqual(0, x_schema.validate.call_count) - mock_nodes.assert_called_once_with(self.ctx, '12345678AB') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.CLUSTER_OPERATION, - name='cluster_dance_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - 'operation': 'dance', - 'params': {}, - 'nodes': ['NODE1', 'NODE2'] - } - ) - mock_start.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'ids_by_cluster') - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_bad_filters(self, mock_find, mock_cluster, - mock_nodes, mock_action): - x_db_cluster = mock.Mock() - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock(id='12345678AB') - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - mock_nodes.return_value = ['NODE1', 'NODE2'] - filters = {'shape': 'round'} - req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER', - operation='dance', - filters=filters) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Filter key 'shape' is unsupported.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - self.assertEqual(0, x_schema.validate.call_count) - self.assertEqual(0, mock_nodes.call_count) - self.assertEqual(0, mock_action.call_count) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'ids_by_cluster') - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_op_no_nodes_found(self, mock_find, mock_cluster, - mock_nodes, mock_action): - x_db_cluster = mock.Mock() - mock_find.return_value = x_db_cluster - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_cluster = mock.Mock(id='12345678AB') - x_cluster.rt = {'profile': x_profile} - mock_cluster.return_value = x_cluster - mock_nodes.return_value = [] - mock_action.return_value = 'ACTION_ID' - filters = {'role': 'slave'} - req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER', - operation='dance', filters=filters) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("No node (matching the filter) could be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster) - mock_nodes.assert_called_once_with(self.ctx, '12345678AB', - filters={'role': 'slave'}) - self.assertEqual(0, mock_action.call_count) diff --git a/senlin/tests/unit/conductor/service/test_cluster_policies.py b/senlin/tests/unit/conductor/service/test_cluster_policies.py deleted file mode 100644 index a13c7f8a4..000000000 --- a/senlin/tests/unit/conductor/service/test_cluster_policies.py +++ /dev/null @@ -1,412 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine.actions import base as action_mod -from senlin.engine import dispatcher -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cpo -from senlin.objects import policy as po -from senlin.objects.requests import cluster_policies as orcp -from senlin.objects.requests import clusters as orco -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ClusterPolicyTest(base.SenlinTestCase): - def setUp(self): - super(ClusterPolicyTest, self).setUp() - self.ctx = utils.dummy_context(project='cluster_policy_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_list2(self, mock_get, mock_find): - x_obj = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_obj - b1 = mock.Mock() - b1.to_dict.return_value = {'k': 'v1'} - b2 = mock.Mock() - b2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [b1, b2] - - req = orcp.ClusterPolicyListRequest(identity='CLUSTER') - result = self.svc.cluster_policy_list( - self.ctx, req.obj_to_primitive()) - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_get.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - filters={}, sort=None) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_list2_with_param(self, mock_get, mock_find): - x_obj = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_obj - mock_get.return_value = [] - - params = { - 'identity': 'CLUSTER', - 'policy_name': 'fake_name', - 'policy_type': 'fake_type', - 'enabled': True, - 'sort': 'enabled' - } - - req = orcp.ClusterPolicyListRequest(**params) - - result = self.svc.cluster_policy_list( - self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - - def test_list2_bad_param(self): - params = { - 'identity': 'CLUSTER', - 'sort': 'bad', - } - - ex = self.assertRaises(ValueError, - orcp.ClusterPolicyListRequest, - **params) - self.assertEqual("Unsupported sort key 'bad' for 'sort'.", - str(ex)) - - @mock.patch.object(co.Cluster, 'find') - def test_list2_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orcp.ClusterPolicyListRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_list, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_get2(self, mock_get, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock(id='C1') - mock_policy.return_value = mock.Mock(id='P1') - x_binding = mock.Mock() - x_binding.to_dict.return_value = {'foo': 'bar'} - mock_get.return_value = x_binding - - req = orcp.ClusterPolicyGetRequest(identity='C1', - policy_id='P1') - result = self.svc.cluster_policy_get(self.ctx, - req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - mock_get.assert_called_once_with(self.ctx, 'C1', 'P1') - - @mock.patch.object(co.Cluster, 'find') - def test_get2_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='cid') - req = orcp.ClusterPolicyGetRequest(identity='cid', - policy_id='pid') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_get, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'cid' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'cid') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_get2_policy_not_found(self, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock(id='cid') - mock_policy.side_effect = exc.ResourceNotFound(type='policy', - id='pid') - req = orcp.ClusterPolicyGetRequest(identity='cid', - policy_id='pid') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The policy 'pid' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'cid') - mock_policy.assert_called_once_with(self.ctx, 'pid') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_get2_binding_not_found(self, mock_get, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock(id='cid') - mock_policy.return_value = mock.Mock(id='pid') - err = exc.PolicyBindingNotFound(policy='pid', identity='cid') - mock_get.side_effect = err - - req = orcp.ClusterPolicyGetRequest(identity='cid', - policy_id='pid') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.PolicyBindingNotFound, ex.exc_info[0]) - self.assertEqual("The policy 'pid' is not found attached to " - "the specified cluster 'cid'.", - str(ex.exc_info[1])) - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_attach2(self, notify, mock_policy, mock_cluster, mock_action): - mock_cluster.return_value = mock.Mock(id='12345678abcd') - mock_policy.return_value = mock.Mock(id='87654321abcd') - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterAttachPolicyRequest(identity='C1', policy_id='P1', - enabled=True) - - res = self.svc.cluster_policy_attach(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - - mock_action.assert_called_once_with( - self.ctx, '12345678abcd', consts.CLUSTER_ATTACH_POLICY, - name='attach_policy_12345678', - cluster_id='12345678abcd', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'policy_id': '87654321abcd', 'enabled': True}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_attach2_cluster_not_found(self, mock_cluster): - mock_cluster.side_effect = exc.ResourceNotFound(type='cluster', - id='BOGUS') - req = orco.ClusterAttachPolicyRequest(identity='BOGUS', - policy_id='POLICY_ID') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_attach, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'BOGUS' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'BOGUS') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_attach2_policy_not_found(self, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock(id='12345678abcd') - mock_policy.side_effect = exc.ResourceNotFound(type='policy', - id='BOGUS') - req = orco.ClusterAttachPolicyRequest(identity='CLUSTER', - policy_id='BOGUS') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_attach, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified policy 'BOGUS' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'CLUSTER') - mock_policy.assert_called_once_with(self.ctx, 'BOGUS') - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_detach2(self, notify, mock_policy, mock_cluster, mock_cp, - mock_action): - mock_cluster.return_value = mock.Mock(id='12345678abcd') - mock_policy.return_value = mock.Mock(id='87654321abcd') - mock_action.return_value = 'ACTION_ID' - mock_cp.return_value = mock.Mock() - req = orco.ClusterDetachPolicyRequest(identity='C1', policy_id='P1') - - res = self.svc.cluster_policy_detach(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - mock_cp.assert_called_once_with(self.ctx, '12345678abcd', - '87654321abcd') - mock_action.assert_called_once_with( - self.ctx, '12345678abcd', consts.CLUSTER_DETACH_POLICY, - name='detach_policy_12345678', - cluster_id='12345678abcd', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'policy_id': '87654321abcd'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_detach2_cluster_not_found(self, mock_cluster): - mock_cluster.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterDetachPolicyRequest(identity='Bogus', - policy_id='POLICY_ID') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_detach, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_detach2_policy_not_found(self, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock() - mock_policy.side_effect = exc.ResourceNotFound(type='policy', - id='Bogus') - req = orco.ClusterDetachPolicyRequest(identity='CLUSTER', - policy_id='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_detach, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified policy 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'CLUSTER') - mock_policy.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_detach2_binding_not_found(self, mock_policy, mock_cluster, - mock_cp): - mock_cluster.return_value = mock.Mock(id='X_CLUSTER') - mock_policy.return_value = mock.Mock(id='X_POLICY') - mock_cp.return_value = None - req = orco.ClusterDetachPolicyRequest(identity='C1', policy_id='P1') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_detach, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The policy 'P1' is not attached to " - "the specified cluster 'C1'.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - mock_cp.assert_called_once_with(self.ctx, 'X_CLUSTER', 'X_POLICY') - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_update2(self, notify, mock_policy, mock_cluster, mock_cp, - mock_action): - mock_cluster.return_value = mock.Mock(id='12345678abcd') - mock_policy.return_value = mock.Mock(id='87654321abcd') - mock_action.return_value = 'ACTION_ID' - mock_cp.return_value = mock.Mock() - req = orco.ClusterUpdatePolicyRequest(identity='C1', policy_id='P1', - enabled=False) - - res = self.svc.cluster_policy_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - mock_cp.assert_called_once_with(self.ctx, '12345678abcd', - '87654321abcd') - mock_action.assert_called_once_with( - self.ctx, '12345678abcd', consts.CLUSTER_UPDATE_POLICY, - name='update_policy_12345678', - cluster_id='12345678abcd', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'policy_id': '87654321abcd', 'enabled': False}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_update2_cluster_not_found(self, mock_cluster): - mock_cluster.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterUpdatePolicyRequest(identity='Bogus', policy_id='P1', - enabled=True) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_update2_policy_not_found(self, mock_policy, mock_cluster): - mock_cluster.return_value = mock.Mock() - mock_policy.side_effect = exc.ResourceNotFound(type='policy', - id='Bogus') - req = orco.ClusterUpdatePolicyRequest(identity='C1', policy_id='Bogus', - enabled=True) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified policy 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Policy, 'find') - def test_update2_binding_not_found(self, mock_policy, mock_cluster, - mock_cp): - mock_cluster.return_value = mock.Mock(id='CLUSTER_ID1') - mock_policy.return_value = mock.Mock(id='POLICY_ID1') - mock_cp.return_value = None - req = orco.ClusterUpdatePolicyRequest(identity='C1', policy_id='P1', - enabled=True) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_policy_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The policy 'P1' is not attached to the " - "specified cluster 'C1'.", - str(ex.exc_info[1])) - - mock_cluster.assert_called_once_with(self.ctx, 'C1') - mock_policy.assert_called_once_with(self.ctx, 'P1') - mock_cp.assert_called_once_with(self.ctx, 'CLUSTER_ID1', 'POLICY_ID1') diff --git a/senlin/tests/unit/conductor/service/test_clusters.py b/senlin/tests/unit/conductor/service/test_clusters.py deleted file mode 100644 index e8c87fe11..000000000 --- a/senlin/tests/unit/conductor/service/test_clusters.py +++ /dev/null @@ -1,2188 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_messaging.rpc import dispatcher as rpc -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.common import utils as common_utils -from senlin.conductor import service -from senlin.engine.actions import base as am -from senlin.engine.actions import cluster_action as ca -from senlin.engine import dispatcher -from senlin.engine import node as nm -from senlin.objects import action as ao -from senlin.objects import base as obj_base -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cpo -from senlin.objects import node as no -from senlin.objects import profile as po -from senlin.objects import receiver as ro -from senlin.objects.requests import clusters as orco -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ClusterTest(base.SenlinTestCase): - def setUp(self): - super(ClusterTest, self).setUp() - - self.ctx = utils.dummy_context(project='cluster_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(co.Cluster, 'count_all') - def test_check_cluster_quota(self, mock_count): - mock_count.return_value = 10 - cfg.CONF.set_override('max_clusters_per_project', 11) - - res = self.svc.check_cluster_quota(self.ctx) - - self.assertIsNone(res) - mock_count.assert_called_once_with(self.ctx) - - @mock.patch.object(co.Cluster, 'count_all') - def test_check_cluster_quota_failed(self, mock_count): - mock_count.return_value = 11 - cfg.CONF.set_override('max_clusters_per_project', 11) - - ex = self.assertRaises(exc.OverQuota, - self.svc.check_cluster_quota, self.ctx) - self.assertEqual("Quota exceeded for resources.", - str(ex)) - - def _prepare_request(self, req): - mock_cls = self.patchobject(obj_base.SenlinObject, - 'obj_class_from_name') - req.update({'senlin_object.name': 'RequestClass', - 'senlin_object.version': '1.0'}) - req_base = mock.Mock() - mock_cls.return_value = req_base - req_obj = mock.Mock() - for k, v in req.items(): - setattr(req_obj, k, v) - req_base.obj_from_primitive.return_value = req_obj - - @mock.patch.object(co.Cluster, 'get_all') - def test_cluster_list(self, mock_get): - x_obj_1 = mock.Mock() - x_obj_1.to_dict.return_value = {'k': 'v1'} - x_obj_2 = mock.Mock() - x_obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [x_obj_1, x_obj_2] - req = orco.ClusterListRequest(project_safe=True) - - result = self.svc.cluster_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(co.Cluster, 'get_all') - def test_cluster_list_with_params(self, mock_get): - mock_get.return_value = [] - marker = uuidutils.generate_uuid() - req = { - 'limit': 10, - 'marker': marker, - 'name': ['test_cluster'], - 'status': ['ACTIVE'], - 'sort': 'name:asc', - 'project_safe': True - } - self._prepare_request(req) - - result = self.svc.cluster_list(self.ctx, req) - - self.assertEqual([], result) - mock_get.assert_called_once_with( - self.ctx, limit=10, marker=marker, sort='name:asc', - filters={'name': ['test_cluster'], 'status': ['ACTIVE']}, - project_safe=True) - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, "create") - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_create(self, notify, mock_profile, mock_cluster, - mock_action, mock_check, mock_quota): - x_profile = mock.Mock(id='PROFILE_ID') - mock_profile.return_value = x_profile - x_cluster = mock.Mock(id='12345678ABC') - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - mock_check.return_value = None - mock_quota.return_value = None - req = orco.ClusterCreateRequestBody(name='C1', profile_id='PROFILE', - desired_capacity=3) - - # do it - result = self.svc.cluster_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_profile.assert_called_once_with(self.ctx, 'PROFILE') - mock_check.assert_called_once_with(None, 3, None, None, True) - mock_cluster.assert_called_once_with( - self.ctx, - dict(name='C1', desired_capacity=3, profile_id='PROFILE_ID', - min_size=0, max_size=-1, timeout=3600, metadata={}, - dependents={}, data={}, next_index=1, status='INIT', - config={}, - status_reason='Initializing', user=self.ctx.user_id, - project=self.ctx.project_id, domain=self.ctx.domain_id)) - mock_action.assert_called_once_with( - self.ctx, - '12345678ABC', 'CLUSTER_CREATE', - name='cluster_create_12345678', - cluster_id='12345678ABC', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - ) - notify.assert_called_once_with() - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, "create") - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_create_desired_null(self, notify, mock_profile, - mock_cluster, mock_action, - mock_check, mock_quota): - x_profile = mock.Mock(id='PROFILE_ID') - mock_profile.return_value = x_profile - x_cluster = mock.Mock(id='12345678ABC') - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_cluster.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - mock_check.return_value = None - mock_quota.return_value = None - req = orco.ClusterCreateRequestBody(name='C1', profile_id='PROFILE', - min_size=1, max_size=5, - config={'k1': 'v1'}) - - # do it - result = self.svc.cluster_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_profile.assert_called_once_with(self.ctx, 'PROFILE') - mock_check.assert_called_once_with(None, 1, 1, 5, True) - mock_cluster.assert_called_once_with( - self.ctx, - dict(name='C1', desired_capacity=1, profile_id='PROFILE_ID', - min_size=1, max_size=5, timeout=3600, metadata={}, - dependents={}, data={}, next_index=1, status='INIT', - config={'k1': 'v1'}, - status_reason='Initializing', user=self.ctx.user_id, - project=self.ctx.project_id, domain=self.ctx.domain_id)) - mock_action.assert_called_once_with( - self.ctx, - '12345678ABC', 'CLUSTER_CREATE', - name='cluster_create_12345678', - cluster_id='12345678ABC', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - ) - notify.assert_called_once_with() - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - def test_cluster_create_exceeding_quota(self, mock_quota): - mock_quota.side_effect = exc.OverQuota() - req = {'profile_id': 'PROFILE', 'name': 'CLUSTER'} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_create, - self.ctx, req) - - self.assertEqual(exc.OverQuota, ex.exc_info[0]) - self.assertEqual("Quota exceeded for resources.", - str(ex.exc_info[1])) - mock_quota.assert_called_once_with(self.ctx) - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - @mock.patch.object(co.Cluster, 'get_by_name') - def test_cluster_create_duplicate_name(self, mock_get, mock_quota): - cfg.CONF.set_override('name_unique', True) - mock_quota.return_value = None - mock_get.return_value = mock.Mock() - req = {'profile_id': 'PROFILE', 'name': 'CLUSTER'} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_create, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual(_("a cluster named 'CLUSTER' already exists."), - str(ex.exc_info[1])) - mock_get.assert_called_once_with(self.ctx, 'CLUSTER') - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - @mock.patch.object(po.Profile, 'find') - def test_cluster_create_profile_not_found(self, mock_find, mock_quota): - mock_quota.return_value = None - mock_find.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - req = {'profile_id': 'Bogus', 'name': 'CLUSTER'} - self._prepare_request(req) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_create, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified profile 'Bogus' could not " - "be found.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(service.ConductorService, 'check_cluster_quota') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(su, 'check_size_params') - def test_cluster_create_failed_checking(self, mock_check, mock_find, - mock_quota): - mock_quota.return_value = None - mock_find.return_value = mock.Mock() - mock_check.return_value = 'INVALID' - req = {'profile_id': 'PROFILE', 'name': 'CLUSTER'} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_create, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("INVALID.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'PROFILE') - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_get(self, mock_find): - x_cluster = mock.Mock() - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_cluster - project_safe = not self.ctx.is_admin - - req = orco.ClusterGetRequest(identity='C1') - - result = self.svc.cluster_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with( - self.ctx, 'C1', project_safe=project_safe) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = {'identity': 'CLUSTER'} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_get, - self.ctx, req) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_update(self, notify, mock_find, mock_profile, - mock_action): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - profile_id='OLD_PROFILE', - metadata={'A': 'B'}) - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_cluster - old_profile = mock.Mock(type='FAKE_TYPE', id='ID_OLD') - new_profile = mock.Mock(type='FAKE_TYPE', id='ID_NEW') - mock_profile.side_effect = [old_profile, new_profile] - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterUpdateRequest(identity='FAKE_ID', name='new_name', - profile_id='NEW_PROFILE', - metadata={'B': 'A'}, timeout=120, - config={'k1': 'v1'}) - - # do it - result = self.svc.cluster_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'OLD_PROFILE'), - mock.call(self.ctx, 'NEW_PROFILE'), - ]) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_UPDATE', - name='cluster_update_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - 'new_profile_id': 'ID_NEW', - 'metadata': { - 'B': 'A', - }, - 'timeout': 120, - 'name': 'new_name', - 'config': { - 'k1': 'v1', - }, - } - ) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = {'identity': 'Bogus', 'name': 'new-name'} - self._prepare_request(req) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_cluster_bad_status(self, mock_find): - x_cluster = mock.Mock(status='ERROR') - mock_find.return_value = x_cluster - req = {'identity': 'CLUSTER', 'name': 'new-name'} - self._prepare_request(req) - - self.assertEqual(consts.CS_ERROR, x_cluster.status) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req) - - self.assertEqual(exc.FeatureNotSupported, ex.exc_info[0]) - self.assertEqual('Updating a cluster in error state is not supported.', - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_profile_not_found(self, mock_find, mock_profile): - mock_find.return_value = mock.Mock(status='ACTIVE', - profile_id='OLD_ID') - mock_profile.side_effect = [ - mock.Mock(type='FAKE_TYPE', id='OLD_ID'), - exc.ResourceNotFound(type='profile', id='Bogus') - ] - req = orco.ClusterUpdateRequest(identity='CLUSTER', profile_id='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified profile 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'OLD_ID'), - mock.call(self.ctx, 'Bogus'), - ]) - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_diff_profile_type(self, mock_find, mock_profile): - x_obj = mock.Mock(status='ACTIVE', profile_id='OLD_ID') - mock_find.return_value = x_obj - mock_profile.side_effect = [ - mock.Mock(type='FAKE_TYPE', id='OLD_ID'), - mock.Mock(type='DIFF_TYPE', id='NEW_ID'), - ] - req = orco.ClusterUpdateRequest(identity='CLUSTER', - profile_id='NEW_PROFILE') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'OLD_ID'), - mock.call(self.ctx, 'NEW_PROFILE'), - ]) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_update_same_profile(self, notify, mock_find, - mock_profile, mock_action): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - profile_id='OLD_PROFILE') - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_cluster - old_profile = mock.Mock(type='FAKE_TYPE', id='ID_OLD') - new_profile = mock.Mock(type='FAKE_TYPE', id='ID_OLD') - mock_profile.side_effect = [old_profile, new_profile] - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterUpdateRequest(identity='FAKE_ID', name='NEW_NAME', - profile_id='NEW_PROFILE') - - # do it - result = self.svc.cluster_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'OLD_PROFILE'), - mock.call(self.ctx, 'NEW_PROFILE'), - ]) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_UPDATE', - name='cluster_update_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - # Note profile_id is not shown in the inputs - 'name': 'NEW_NAME', - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_update_same_metadata(self, notify, mock_find, - mock_action): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - metadata={'K': 'V'}) - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterUpdateRequest(identity='FAKE_ID', name='NEW_NAME', - metadata={'K': 'V'}) - - # do it - result = self.svc.cluster_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_UPDATE', - name='cluster_update_12345678', - cluster_id='12345678AB', - status=am.Action.READY, - cause=consts.CAUSE_RPC, - inputs={ - # Note metadata is not included in the inputs - 'name': 'NEW_NAME', - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_update_same_timeout(self, notify, mock_find, - mock_action): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - timeout=10) - x_cluster.to_dict.return_value = {'foo': 'bar'} - x_cluster.timeout = 10 - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterUpdateRequest(identity='FAKE_ID', name='NEW_NAME', - timeout=10) - - # do it - result = self.svc.cluster_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_UPDATE', - name='cluster_update_12345678', - cluster_id='12345678AB', - status=am.Action.READY, - cause=consts.CAUSE_RPC, - inputs={ - # Note timeout is not included in the inputs - 'name': 'NEW_NAME', - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_update_same_name(self, notify, mock_find, - mock_action): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - name='OLD_NAME', timeout=10) - x_cluster.name = 'OLD_NAME' - x_cluster.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterUpdateRequest(identity='FAKE_ID', name='OLD_NAME', - timeout=100) - - # do it - result = self.svc.cluster_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID', 'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_UPDATE', - name='cluster_update_12345678', - cluster_id='12345678AB', - status=am.Action.READY, - cause=consts.CAUSE_RPC, - inputs={ - # Note name is not included in the inputs - 'timeout': 100, - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_all_property_same(self, mock_find): - x_cluster = mock.Mock(id='12345678AB', status='ACTIVE', - name='OLD_NAME', timeout=10) - x_cluster.name = 'OLD_NAME' - x_cluster.timeout = 10 - mock_find.return_value = x_cluster - - # Notice that name and timeout are all not changed. - req = orco.ClusterUpdateRequest(identity='CLUSTER', name='OLD_NAME', - timeout=10) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('', str(ex)) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_update_no_property_updated(self, mock_find): - x_cluster = mock.Mock(status='ACTIVE', profile_id='OLD_ID') - mock_find.return_value = x_cluster - req = orco.ClusterUpdateRequest(identity='CLUSTER') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('', str(ex)) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = {'identity': 'Bogus', 'nodes': ['n1', 'n2']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_add_nodes(self, notify, mock_find, mock_node, - mock_profile, mock_action, mock_check): - x_cluster = mock.Mock(id='12345678AB', profile_id='FAKE_ID', - desired_capacity=4) - mock_find.return_value = x_cluster - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - x_node_1 = mock.Mock(id='NODE1', cluster_id='', status='ACTIVE', - profile_id='FAKE_ID_1') - x_node_2 = mock.Mock(id='NODE2', cluster_id='', status='ACTIVE', - profile_id='FAKE_ID_1') - mock_node.side_effect = [x_node_1, x_node_2] - mock_action.return_value = 'ACTION_ID' - mock_check.return_value = None - req = orco.ClusterAddNodesRequest(identity='C1', - nodes=['NODE_A', 'NODE_B']) - - result = self.svc.cluster_add_nodes(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_node.assert_has_calls([ - mock.call(self.ctx, 'NODE_A'), - mock.call(self.ctx, 'NODE_B'), - ]) - mock_check.assert_called_once_with(x_cluster, 6, strict=True) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.CLUSTER_ADD_NODES, - name='cluster_add_nodes_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'nodes': ['NODE1', 'NODE2']}, - ) - self.assertEqual(3, mock_profile.call_count) - notify.assert_called_once_with() - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_nodes_not_found(self, mock_find, mock_node, - mock_profile): - mock_find.return_value = mock.Mock(id='1234', profile_id='FAKE_ID') - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - mock_node.side_effect = exc.ResourceNotFound(type='node', id='NODE1') - req = {'identity': 'CLUSTER', 'nodes': ['NODE1']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Nodes not found: ['NODE1'].", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_profile.assert_called_once_with(self.ctx, 'FAKE_ID', - project_safe=True) - mock_node.assert_called_once_with(self.ctx, 'NODE1') - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_bad_status(self, mock_find, mock_node, - mock_profile): - mock_find.return_value = mock.Mock(id='1234', profile_id='FAKE_ID') - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - mock_node.return_value = mock.Mock( - id='NODE2', cluster_id='', status='ERROR') - req = {'identity': 'CLUSTER', 'nodes': ['NODE2']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Nodes are not ACTIVE: ['NODE2'].", - str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - self.assertEqual(2, mock_profile.call_count) - mock_node.assert_called_once_with(self.ctx, 'NODE2') - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_node_already_owned(self, mock_find, - mock_node, mock_profile): - - mock_find.return_value = mock.Mock(id='1234', profile_id='FAKE_ID') - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - mock_node.return_value = mock.Mock(id='NODE3', status='ACTIVE', - cluster_id='OTHER') - req = {'identity': 'CLUSTER', 'nodes': ['NODE3']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Nodes ['NODE3'] already owned by some cluster.", - str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - self.assertEqual(2, mock_profile.call_count) - mock_node.assert_called_once_with(self.ctx, 'NODE3') - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_node_profile_type_not_match( - self, mock_find, mock_node, mock_profile): - - mock_find.return_value = mock.Mock(id='1234', profile_id='FAKE_ID') - mock_profile.side_effect = [ - mock.Mock(type='FAKE_TYPE_1'), - mock.Mock(type='FAKE_TYPE_2'), - ] - mock_node.return_value = mock.Mock(id='NODE4', status='ACTIVE', - cluster_id='', profile_id='DIFF') - req = {'identity': 'CLUSTER', 'nodes': ['NODE4']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Profile type of nodes ['NODE4'] does not " - "match that of the cluster.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'FAKE_ID', project_safe=True), - mock.call(self.ctx, 'DIFF', project_safe=True), - ]) - mock_node.assert_called_once_with(self.ctx, 'NODE4') - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_mult_err(self, mock_find, mock_node, - mock_profile): - mock_find.return_value = mock.Mock(id='1234', profile_id='FAKE_ID') - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - mock_node.return_value = mock.Mock(id='NODE2', status='ERROR') - req = {'identity': 'CLUSTER', 'nodes': ['NODE2']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - msg1 = _("Nodes ['NODE2'] already owned by some cluster.") - msg2 = _("Nodes are not ACTIVE: ['NODE2'].") - self.assertIn(msg1, str(ex.exc_info[1])) - self.assertIn(msg2, str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - self.assertEqual(2, mock_profile.call_count) - mock_node.assert_called_once_with(self.ctx, 'NODE2') - - @mock.patch.object(po.Profile, 'get') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_add_nodes_failed_checking(self, mock_find, mock_node, - mock_check, mock_profile): - x_cluster = mock.Mock(id='12345678AB', profile_id='FAKE_PROFILE', - desired_capacity=2) - mock_find.return_value = x_cluster - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - x_node_1 = mock.Mock(id='NODE1', cluster_id='', status='ACTIVE', - profile_id='FAKE_PROFILE_1') - x_node_2 = mock.Mock(id='NODE2', cluster_id='', status='ACTIVE', - profile_id='FAKE_PROFILE_2') - mock_node.side_effect = [x_node_1, x_node_2] - mock_check.return_value = 'Failed size checking.' - req = {'identity': 'C1', 'nodes': ['NODE_A', 'NODE_B']} - self._prepare_request(req) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_add_nodes, - self.ctx, req) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Failed size checking.", - str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'FAKE_PROFILE', project_safe=True), - mock.call(self.ctx, 'FAKE_PROFILE_1', project_safe=True), - mock.call(self.ctx, 'FAKE_PROFILE_2', project_safe=True), - ]) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'NODE_A'), - mock.call(self.ctx, 'NODE_B'), - ]) - mock_check.assert_called_once_with(x_cluster, 4, strict=True) - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_del_nodes(self, notify, mock_find, mock_node, - mock_action, mock_check): - x_cluster = mock.Mock(id='1234', desired_capacity=2) - mock_find.return_value = x_cluster - mock_node.return_value = mock.Mock(id='NODE2', cluster_id='1234', - dependents={}) - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE1']) - - result = self.svc.cluster_del_nodes(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_node.assert_called_once_with(self.ctx, 'NODE1') - mock_check.assert_called_once_with(x_cluster, 1, strict=True) - mock_action.assert_called_once_with( - self.ctx, '1234', consts.CLUSTER_DEL_NODES, - name='cluster_del_nodes_1234', - cluster_id='1234', - status=am.Action.READY, - cause=consts.CAUSE_RPC, - inputs={ - 'count': 1, - 'candidates': ['NODE2'], - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterDelNodesRequest(identity='Bogus', nodes=['NODE1']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_node_not_found(self, mock_find, mock_node): - mock_find.return_value = mock.Mock() - mock_node.side_effect = exc.ResourceNotFound(type='node', id='NODE1') - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE1']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertIn("Nodes not found", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_node.assert_called_once_with(self.ctx, 'NODE1') - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_have_containers(self, mock_cluster, mock_node): - mock_cluster.return_value = mock.Mock(id='CLUSTER1') - dependents = {'nodes': ['container1']} - node = mock.Mock(id='NODE1', dependents=dependents, - cluster_id='CLUSTER1') - mock_node.return_value = node - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE1']) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - message = _("nodes ['NODE1'] are depended by other nodes, so can't be " - "deleted or become orphan nodes") - self.assertIn(message, str(ex.exc_info[1])) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_node_in_other_cluster(self, mock_find, - mock_node): - mock_find.return_value = mock.Mock(id='1234') - mock_node.return_value = mock.Mock(id='NODE2', cluster_id='5678') - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE2']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Nodes not members of specified cluster: ['NODE2'].", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_node.assert_called_once_with(self.ctx, 'NODE2') - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_mult_errors(self, mock_find, mock_node): - mock_find.return_value = mock.Mock(id='1234') - mock_node.side_effect = [mock.Mock(id='NODE1', cluster_id='5678'), - exc.ResourceNotFound(type='node', id='NODE2')] - req = orco.ClusterDelNodesRequest(identity='CLUSTER', - nodes=['NODE1', 'NODE2']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - msg1 = _("Nodes not found:") - msg2 = _("Nodes not members of specified cluster: ['NODE1'].") - self.assertIn(msg1, str(ex.exc_info[1])) - self.assertIn(msg2, str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - self.assertEqual(2, mock_node.call_count) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_orphan_nodes(self, mock_find, mock_node): - mock_find.return_value = mock.Mock(id='1234') - mock_node.return_value = mock.Mock(id='NODE3', cluster_id='') - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE3']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Nodes not members of specified cluster: ['NODE3'].", - str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_node.assert_called_once_with(self.ctx, 'NODE3') - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_del_nodes_failed_checking(self, mock_find, mock_node, - mock_check): - x_cluster = mock.Mock(id='1234', desired_capacity=2) - mock_find.return_value = x_cluster - mock_node.return_value = mock.Mock(id='NODE2', cluster_id='1234', - dependents={}) - mock_check.return_value = 'Failed size checking.' - req = orco.ClusterDelNodesRequest(identity='CLUSTER', nodes=['NODE3']) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_del_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Failed size checking.", - str(ex.exc_info[1])) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_node.assert_called_once_with(self.ctx, 'NODE3') - mock_check.assert_called_once_with(x_cluster, 1, strict=True) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'calculate_desired') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_resize_exact_capacity(self, mock_find, mock_action, - notify, mock_check, mock_calc, - mock_count): - x_cluster = mock.Mock(id='12345678ABCDEFGH') - mock_find.return_value = x_cluster - mock_count.return_value = 3 - mock_calc.return_value = 5 - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.EXACT_CAPACITY, - number=5 - ) - - res = self.svc.cluster_resize(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_calc.assert_called_once_with(3, consts.EXACT_CAPACITY, 5, None) - mock_check.assert_called_once_with(x_cluster, 5, None, None, True) - mock_action.assert_called_once_with( - self.ctx, '12345678ABCDEFGH', consts.CLUSTER_RESIZE, - name='cluster_resize_12345678', - cluster_id='12345678ABCDEFGH', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY, - consts.ADJUSTMENT_NUMBER: 5, - consts.ADJUSTMENT_MIN_SIZE: None, - consts.ADJUSTMENT_MAX_SIZE: None, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'calculate_desired') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_resize_change_in_capacity(self, mock_find, mock_action, - notify, mock_check, mock_calc, - mock_count): - x_cluster = mock.Mock(id='12345678ABCDEFGH') - mock_find.return_value = x_cluster - mock_count.return_value = 2 - mock_calc.return_value = 7 - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.CHANGE_IN_CAPACITY, - number=5 - ) - - res = self.svc.cluster_resize(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_calc.assert_called_once_with(2, consts.CHANGE_IN_CAPACITY, 5, - None) - mock_check.assert_called_once_with(x_cluster, 7, None, None, True) - mock_action.assert_called_once_with( - self.ctx, '12345678ABCDEFGH', consts.CLUSTER_RESIZE, - name='cluster_resize_12345678', - cluster_id='12345678ABCDEFGH', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - consts.ADJUSTMENT_TYPE: consts.CHANGE_IN_CAPACITY, - consts.ADJUSTMENT_NUMBER: 5, - consts.ADJUSTMENT_MIN_SIZE: None, - consts.ADJUSTMENT_MAX_SIZE: None, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True - }, - ) - notify.assert_called_once_with() - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'calculate_desired') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_resize_change_in_percentage(self, mock_find, mock_action, - notify, mock_check, - mock_calc, mock_count): - x_cluster = mock.Mock(id='12345678ABCDEFGH') - mock_find.return_value = x_cluster - mock_count.return_value = 10 - mock_calc.return_value = 8 - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.CHANGE_IN_PERCENTAGE, - number=15.81 - ) - - res = self.svc.cluster_resize(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_calc.assert_called_once_with(10, consts.CHANGE_IN_PERCENTAGE, - 15.81, None) - mock_check.assert_called_once_with(x_cluster, 8, None, None, True) - mock_action.assert_called_once_with( - self.ctx, '12345678ABCDEFGH', consts.CLUSTER_RESIZE, - name='cluster_resize_12345678', - cluster_id='12345678ABCDEFGH', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={ - consts.ADJUSTMENT_TYPE: consts.CHANGE_IN_PERCENTAGE, - consts.ADJUSTMENT_NUMBER: 15.81, - consts.ADJUSTMENT_MIN_SIZE: None, - consts.ADJUSTMENT_MAX_SIZE: None, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True - }, - ) - notify.assert_called_once_with() - - def test_cluster_resize_type_missing_number(self): - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.EXACT_CAPACITY - ) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_resize, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Missing number value for size adjustment.", - str(ex.exc_info[1])) - - def test_cluster_resize_number_without_type(self): - req = orco.ClusterResizeRequest( - identity='CLUSTER', - number=10 - ) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_resize, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Missing adjustment_type " - "value for size adjustment.", - str(ex.exc_info[1])) - - def test_cluster_resize_bad_number_for_exact_capacity(self): - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.EXACT_CAPACITY, - number=-5 - ) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_resize, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The 'number' must be non-negative integer for " - "adjustment type 'EXACT_CAPACITY'.", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_resize_cluster_not_found(self, mock_find): - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.EXACT_CAPACITY, - number=10 - ) - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='CLUSTER') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_resize, - self.ctx, req.obj_to_primitive()) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'CLUSTER' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_resize_failing_size_check(self, mock_find, mock_count, - mock_check): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - mock_count.return_value = 5 - mock_check.return_value = 'size check.' - req = orco.ClusterResizeRequest( - identity='CLUSTER', - adjustment_type=consts.EXACT_CAPACITY, - number=5 - ) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_resize, - self.ctx, req.obj_to_primitive()) - - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_count.assert_called_once_with(self.ctx, 'CID') - mock_check.assert_called_once_with(x_cluster, 5, None, None, True) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("size check.", - str(ex.exc_info[1])) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_out(self, mock_find, mock_check, mock_action, - notify): - x_cluster = mock.Mock(id='12345678ABCDEFGH', desired_capacity=4) - mock_find.return_value = x_cluster - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterScaleOutRequest(identity='CLUSTER', count=1) - - result = self.svc.cluster_scale_out(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_check.assert_called_once_with(x_cluster, 5) - mock_action.assert_called_once_with( - self.ctx, '12345678ABCDEFGH', consts.CLUSTER_SCALE_OUT, - name='cluster_scale_out_12345678', - cluster_id='12345678ABCDEFGH', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'count': 1}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_out_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterScaleOutRequest(identity='Bogus', count=1) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_out, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_out_count_is_none(self, mock_find, mock_action, - notify): - mock_find.return_value = mock.Mock(id='12345678ABCDEFGH', - desired_capacity=4) - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterScaleOutRequest(identity='CLUSTER') - - result = self.svc.cluster_scale_out(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_action.assert_called_once_with( - self.ctx, '12345678ABCDEFGH', consts.CLUSTER_SCALE_OUT, - name='cluster_scale_out_12345678', - cluster_id='12345678ABCDEFGH', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_out_count_zero(self, mock_find): - mock_find.return_value = mock.Mock(desired_capacity=4) - req = orco.ClusterScaleOutRequest(identity='CLUSTER', count=0) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_out, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Count for scale-out request cannot be 0.", - str(ex.exc_info[1])) - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_out_failed_size_check(self, mock_find, mock_check): - x_cluster = mock.Mock(desired_capacity=4) - mock_find.return_value = x_cluster - mock_check.return_value = 'size limit' - req = orco.ClusterScaleOutRequest(identity='CLUSTER', count=2) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_out, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("size limit.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_check.assert_called_once_with(x_cluster, 6) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_in(self, mock_find, mock_check, mock_action, - notify): - x_cluster = mock.Mock(id='12345678ABCD', desired_capacity=4) - mock_find.return_value = x_cluster - mock_check.return_value = None - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterScaleInRequest(identity='CLUSTER', count=2) - - result = self.svc.cluster_scale_in(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_check.assert_called_once_with(x_cluster, 2) - mock_action.assert_called_once_with( - self.ctx, '12345678ABCD', consts.CLUSTER_SCALE_IN, - name='cluster_scale_in_12345678', - cluster_id='12345678ABCD', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'count': 2}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_in_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterScaleInRequest(identity='Bogus', count=2) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_in, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_in_count_is_none(self, mock_find, mock_action, - notify): - mock_find.return_value = mock.Mock(id='FOO', desired_capacity=4) - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterScaleInRequest(identity='CLUSTER') - - result = self.svc.cluster_scale_in(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_action.assert_called_once_with( - self.ctx, 'FOO', consts.CLUSTER_SCALE_IN, - name='cluster_scale_in_FOO', - cluster_id='FOO', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_in_count_zero(self, mock_find): - mock_find.return_value = mock.Mock(desired_capacity=4) - req = orco.ClusterScaleInRequest(identity='CLUSTER', count=0) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_in, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Count for scale-in request cannot be 0.", - str(ex.exc_info[1])) - - @mock.patch.object(su, 'check_size_params') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_scale_in_failed_size_check(self, mock_find, mock_check): - x_cluster = mock.Mock(desired_capacity=4) - mock_find.return_value = x_cluster - mock_check.return_value = 'size limit' - req = orco.ClusterScaleInRequest(identity='FAKE_CLUSTER', count=2) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_scale_in, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("size limit.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_check.assert_called_once_with(x_cluster, 2) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_check(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID', user='USER', project='PROJECT') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterCheckRequest(identity='C1', params={'foo': 'bar'}) - - res = self.svc.cluster_check(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_CHECK, - name='cluster_check_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'foo': 'bar'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(ao.Action, 'delete_by_target') - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_check_with_delete(self, notify, mock_find, mock_action, - mock_delete): - x_cluster = mock.Mock(id='CID', user='USER', project='PROJECT') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterCheckRequest(identity='C1', - params={'delete_check_action': True}) - - res = self.svc.cluster_check(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_delete.assert_called_once_with( - self.ctx, 'CID', action=['CLUSTER_CHECK'], - status=['SUCCEEDED', 'FAILED'] - ) - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_CHECK, - name='cluster_check_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'delete_check_action': True}, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_check_user_is_none(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID', project='PROJECT') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterCheckRequest(identity='C1') - - result = self.svc.cluster_check(self.ctx, req.obj_to_primitive()) - - self.assertIsNotNone(x_cluster.user) - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_CHECK, - name='cluster_check_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={}, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_check_project_is_none(self, notify, mock_find, - mock_action): - x_cluster = mock.Mock(id='CID', user='USER') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterCheckRequest(identity='C1') - - result = self.svc.cluster_check(self.ctx, req.obj_to_primitive()) - - self.assertIsNotNone(x_cluster.user) - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_CHECK, - name='cluster_check_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={}, - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_check_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterCheckRequest(identity='C1', params={'foo': 'bar'}) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_check, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'C1') - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_recover(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterRecoverRequest(identity='C1', - params={'operation': 'RECREATE'}) - - result = self.svc.cluster_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_RECOVER, - name='cluster_recover_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'operation': 'RECREATE'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_recover_rebuild(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterRecoverRequest(identity='C1', - params={'operation': 'REBUILD'}) - - result = self.svc.cluster_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_RECOVER, - name='cluster_recover_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'operation': 'REBUILD'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_recover_reboot(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterRecoverRequest(identity='C1', - params={'operation': 'REBOOT'}) - - result = self.svc.cluster_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_RECOVER, - name='cluster_recover_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'operation': 'REBOOT'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_recover_default(self, notify, mock_find, mock_action): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterRecoverRequest(identity='C1') - - result = self.svc.cluster_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_RECOVER, - name='cluster_recover_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={} - ) - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_recover_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterRecoverRequest(identity='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_recover_invalid(self, mock_find): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - - req = orco.ClusterRecoverRequest(identity='Bogus', - params={'bad': 'fake'}) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Action parameter ['bad'] is not recognizable.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_recover_invalid_operation(self, mock_find): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - - req = orco.ClusterRecoverRequest(identity='Bogus', - params={'operation': 'fake'}) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Operation value 'fake' has to be one of the " - "following: REBOOT, REBUILD, RECREATE.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_recover_invalid_operation_params(self, mock_find): - x_cluster = mock.Mock(id='CID') - mock_find.return_value = x_cluster - - req = orco.ClusterRecoverRequest( - identity='Bogus', - params={'operation': 'reboot', - 'operation_params': {'type': 'blah'} - } - ) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Type field 'blah' in operation_params has to be one " - "of the following: SOFT, HARD.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_recover_user_is_none(self, notify, mock_find, - mock_action): - x_cluster = mock.Mock(id='CID', project='PROJECT') - mock_find.return_value = x_cluster - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterRecoverRequest(identity='C1') - - result = self.svc.cluster_recover(self.ctx, req.obj_to_primitive()) - - self.assertIsNotNone(x_cluster.user) - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_RECOVER, - name='cluster_recover_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={}, - ) - notify.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes(self, mock_profile, mock_node): - cluster = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - mock_node.side_effect = [ - mock.Mock(id='OLD_ID', cluster_id='CID'), - mock.Mock(id='NEW_ID', cluster_id='', status=consts.NS_ACTIVE, - profile_id='FAKE_ID_1') - ] - - # do it - res = self.svc._validate_replace_nodes(self.ctx, cluster, - {'OLD_NODE': 'NEW_NODE'}) - - self.assertEqual({'OLD_ID': 'NEW_ID'}, res) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD_NODE'), - mock.call(self.ctx, 'NEW_NODE'), - ]) - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'FAKE_ID', project_safe=True), - mock.call(self.ctx, 'FAKE_ID_1', project_safe=True) - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_old_missing(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = exc.ResourceNotFound(type='node', id='OLD') - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("Original nodes not found: ['OLD']", str(ex)) - mock_node.assert_called_once_with(self.ctx, 'OLD') - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_new_missing(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = [ - mock.Mock(), - exc.ResourceNotFound(type='node', id='NEW') - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("Replacement nodes not found: ['NEW']", - str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD'), - mock.call(self.ctx, 'NEW') - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_old_not_member(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = [ - mock.Mock(cluster_id='OTHER'), - mock.Mock(cluster_id=''), - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("The specified nodes ['OLD'] to be replaced are not " - "members of the cluster CID.", str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD'), - mock.call(self.ctx, 'NEW') - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_new_not_orphan(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = [ - mock.Mock(cluster_id='CID'), - mock.Mock(cluster_id='OTHER'), - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("Nodes ['NEW'] already member of a cluster.", - str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD'), - mock.call(self.ctx, 'NEW') - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_new_bad_status(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = [ - mock.Mock(cluster_id='CID'), - mock.Mock(cluster_id='', status=consts.NS_ERROR), - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("Nodes are not ACTIVE: ['NEW'].", str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD'), - mock.call(self.ctx, 'NEW') - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_mult_err(self, mock_profile, - mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_node.side_effect = [ - mock.Mock(id='OLD1', cluster_id='CID'), - mock.Mock(id='NEW1', cluster_id='OTHER', status=consts.NS_ERROR), - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD1': 'NEW1'}) - - msg1 = _("Nodes ['NEW1'] already member of a cluster.") - msg2 = _("Nodes are not ACTIVE: ['NEW1'].") - self.assertIn(msg1, str(ex)) - self.assertIn(msg2, str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD1'), - mock.call(self.ctx, 'NEW1'), - ]) - - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'get') - def test_validate_replace_nodes_new_profile_type_mismatch( - self, mock_profile, mock_node): - c = mock.Mock(id='CID', profile_id='FAKE_CLUSTER_PROFILE') - mock_profile.side_effect = [ - mock.Mock(type='FAKE_TYPE'), # for cluster - mock.Mock(type='FAKE_TYPE_1'), # for node - ] - mock_node.side_effect = [ - mock.Mock(cluster_id='CID'), - mock.Mock(cluster_id='', status=consts.NS_ACTIVE, - profile_id='FAKE_NODE_PROFILE'), - ] - - # do it - ex = self.assertRaises(exc.BadRequest, - self.svc._validate_replace_nodes, - self.ctx, c, {'OLD': 'NEW'}) - - self.assertIn("Profile type of nodes ['NEW'] do not match that of " - "the cluster.", str(ex)) - mock_node.assert_has_calls([ - mock.call(self.ctx, 'OLD'), - mock.call(self.ctx, 'NEW') - ]) - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'FAKE_CLUSTER_PROFILE', project_safe=True), - mock.call(self.ctx, 'FAKE_NODE_PROFILE', project_safe=True) - ]) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(service.ConductorService, '_validate_replace_nodes') - @mock.patch.object(no.Node, 'find') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_replace_nodes(self, notify, mock_find, - mock_profile, mock_node, - mock_validate, mock_action): - cluster = mock.Mock(id='CID', profile_id='FAKE_ID') - mock_find.return_value = cluster - mock_profile.return_value = mock.Mock(type='FAKE_TYPE') - old_node = mock.Mock(id='ORIGIN', cluster_id='CID', status='ACTIVE') - new_node = mock.Mock(id='REPLACE', cluster_id='', status='ACTIVE', - profile_id='FAKE_ID_1') - mock_node.side_effect = [old_node, new_node] - mock_action.return_value = 'ACTION_ID' - param = {'ORIGINAL': 'REPLACE'} - mock_validate.return_value = param - req = orco.ClusterReplaceNodesRequest(identity='CLUSTER', nodes=param) - - # do it - res = self.svc.cluster_replace_nodes(self.ctx, req.obj_to_primitive()) - - # verify - self.assertEqual({'action': 'ACTION_ID'}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_validate.assert_called_once_with(self.ctx, cluster, param) - mock_action.assert_called_once_with( - self.ctx, 'CID', consts.CLUSTER_REPLACE_NODES, - name='cluster_replace_nodes_CID', - cluster_id='CID', - cause=consts.CAUSE_RPC, - status=am.Action.READY, - inputs={'candidates': {'ORIGINAL': 'REPLACE'}}) - notify.assert_called_once_with() - - @mock.patch.object(service.ConductorService, '_validate_replace_nodes') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_replace_nodes_failed_validate(self, mock_find, mock_chk): - nodes = {'OLD': 'NEW'} - cluster = mock.Mock() - mock_find.return_value = cluster - mock_chk.side_effect = exc.BadRequest(msg='failed') - req = orco.ClusterReplaceNodesRequest(identity='CLUSTER', nodes=nodes) - - # do it - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_replace_nodes, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("failed.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER') - mock_chk.assert_called_once_with(self.ctx, cluster, nodes) - - @mock.patch.object(nm.Node, 'load') - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_collect(self, mock_find, mock_get, mock_load): - x_cluster = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_cluster - x_obj_1 = mock.Mock(id='NODE1', physical_id='PHYID1') - x_obj_1.to_dict.return_value = {'name': 'node1'} - x_obj_2 = mock.Mock(id='NODE2', physical_id='PHYID2') - x_obj_2.to_dict.return_value = {'name': 'node2'} - x_node_1 = mock.Mock() - x_node_2 = mock.Mock() - x_node_1.get_details.return_value = {'ip': '1.2.3.4'} - x_node_2.get_details.return_value = {'ip': '5.6.7.8'} - mock_get.return_value = [x_obj_1, x_obj_2] - mock_load.side_effect = [x_node_1, x_node_2] - req = orco.ClusterCollectRequest(identity='CLUSTER_ID', - path='details.ip') - - res = self.svc.cluster_collect(self.ctx, req.obj_to_primitive()) - - self.assertIn('cluster_attributes', res) - self.assertIn({'id': 'NODE1', 'value': '1.2.3.4'}, - res['cluster_attributes']) - self.assertIn({'id': 'NODE2', 'value': '5.6.7.8'}, - res['cluster_attributes']) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_get.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_load.assert_has_calls([ - mock.call(self.ctx, db_node=x_obj_1), - mock.call(self.ctx, db_node=x_obj_2) - ]) - x_obj_1.to_dict.assert_called_once_with() - x_node_1.get_details.assert_called_once_with(self.ctx) - x_obj_2.to_dict.assert_called_once_with() - x_node_2.get_details.assert_called_once_with(self.ctx) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(common_utils, 'get_path_parser') - def test_cluster_collect_bad_path(self, mock_parser, mock_find): - mock_parser.side_effect = exc.BadRequest(msg='Boom') - req = orco.ClusterCollectRequest(identity='CLUSTER_ID', path='foo.bar') - - err = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_collect, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, err.exc_info[0]) - mock_parser.assert_called_once_with('foo.bar') - self.assertEqual(0, mock_find.call_count) - - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_collect_cluster_not_found(self, mock_find, mock_get): - cid = 'FAKE_CLUSTER' - mock_find.side_effect = exc.ResourceNotFound(type='cluster', id=cid) - req = orco.ClusterCollectRequest(identity=cid, path='foo.bar') - - err = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_collect, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, err.exc_info[0]) - mock_find.assert_called_once_with(self.ctx, cid) - self.assertEqual(0, mock_get.call_count) - - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_collect_no_nodes(self, mock_find, mock_get): - x_cluster = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_cluster - mock_get.return_value = [] - req = orco.ClusterCollectRequest(identity='CLUSTER_ID', path='barr') - - res = self.svc.cluster_collect(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'cluster_attributes': []}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_get.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_collect_no_details(self, mock_find, mock_get): - x_cluster = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_cluster - x_node_1 = mock.Mock(id='NODE1', physical_id=None) - x_node_1.to_dict.return_value = {'name': 'node1'} - x_node_2 = mock.Mock(id='NODE2', physical_id=None) - x_node_2.to_dict.return_value = {'name': 'node2'} - mock_get.return_value = [x_node_1, x_node_2] - req = orco.ClusterCollectRequest(identity='CLUSTER_ID', path='name') - - res = self.svc.cluster_collect(self.ctx, req.obj_to_primitive()) - - self.assertIn('cluster_attributes', res) - self.assertIn({'id': 'NODE1', 'value': 'node1'}, - res['cluster_attributes']) - self.assertIn({'id': 'NODE2', 'value': 'node2'}, - res['cluster_attributes']) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_get.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - x_node_1.to_dict.assert_called_once_with() - self.assertEqual(0, x_node_1.get_details.call_count) - x_node_2.to_dict.assert_called_once_with() - self.assertEqual(0, x_node_2.get_details.call_count) - - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(co.Cluster, 'find') - def test_cluster_collect_no_match(self, mock_find, mock_get): - x_cluster = mock.Mock(id='FAKE_CLUSTER') - mock_find.return_value = x_cluster - x_node_1 = mock.Mock(physical_id=None) - x_node_1.to_dict.return_value = {'name': 'node1'} - x_node_2 = mock.Mock(physical_id=None) - x_node_2.to_dict.return_value = {'name': 'node2'} - mock_get.return_value = [x_node_1, x_node_2] - req = orco.ClusterCollectRequest(identity='CLUSTER_ID', path='bogus') - - res = self.svc.cluster_collect(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'cluster_attributes': []}, res) - mock_find.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_get.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - x_node_1.to_dict.assert_called_once_with() - self.assertEqual(0, x_node_1.get_details.call_count) - x_node_2.to_dict.assert_called_once_with() - self.assertEqual(0, x_node_2.get_details.call_count) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_delete(self, notify, mock_find, mock_action): - x_obj = mock.Mock(id='12345678AB', status='ACTIVE', dependents={}) - mock_find.return_value = x_obj - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterDeleteRequest(identity='IDENTITY', force=False) - - result = self.svc.cluster_delete(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'IDENTITY') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', 'CLUSTER_DELETE', - name='cluster_delete_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - force=True, - status=am.Action.READY) - - notify.assert_called_once_with() - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_delete_with_containers(self, mock_find): - dependents = {'profiles': ['profile1']} - cluster = mock.Mock(id='cluster1', status='ACTIVE', - dependents=dependents) - mock_find.return_value = cluster - req = orco.ClusterDeleteRequest(identity='FAKE_CLUSTER', - force=False) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_delete, - self.ctx, req.obj_to_primitive()) - - msg = _("The cluster 'FAKE_CLUSTER' cannot be deleted: still " - "referenced by profile(s): ['profile1'].") - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - self.assertEqual(msg, str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_delete_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orco.ClusterDeleteRequest(identity='Bogus', - force=False) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_cluster_delete_improper_status(self, mock_find): - for bad_status in [consts.CS_CREATING, consts.CS_UPDATING, - consts.CS_DELETING, consts.CS_RECOVERING]: - fake_cluster = mock.Mock(id='12345678AB', status=bad_status) - mock_find.return_value = fake_cluster - req = orco.ClusterDeleteRequest(identity='BUSY', - force=False) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.cluster_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ActionInProgress, ex.exc_info[0]) - self.assertEqual( - "The cluster 'BUSY' is in status %s." % bad_status, - str(ex.exc_info[1])) - - @mock.patch.object(am.Action, 'create') - @mock.patch.object(ro.Receiver, 'get_all') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_cluster_delete_force(self, notify, mock_find, mock_policies, - mock_receivers, mock_action): - for bad_status in [consts.CS_CREATING, consts.CS_UPDATING, - consts.CS_DELETING, consts.CS_RECOVERING]: - x_obj = mock.Mock(id='12345678AB', status=bad_status, - dependents={}) - mock_find.return_value = x_obj - mock_policies.return_value = [] - mock_receivers.return_value = [] - mock_action.return_value = 'ACTION_ID' - req = orco.ClusterDeleteRequest(identity='IDENTITY', force=True) - - result = self.svc.cluster_delete(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_with(self.ctx, 'IDENTITY') - mock_policies.assert_not_called() - mock_receivers.assert_not_called() - mock_action.assert_called_with( - self.ctx, '12345678AB', 'CLUSTER_DELETE', - name='cluster_delete_12345678', - cluster_id='12345678AB', - cause=consts.CAUSE_RPC, - force=True, - status=am.Action.READY) - - notify.assert_called_with() - - @mock.patch.object(ca, 'CompleteLifecycleProc') - def test_cluster_complete_lifecycle(self, mock_lifecycle): - req = orco.ClusterCompleteLifecycleRequest( - identity='CLUSTER', lifecycle_action_token='NODE_ACTION_ID') - - # do it - res = self.svc.cluster_complete_lifecycle(self.ctx, - req.obj_to_primitive()) - - # verify - self.assertEqual({'action': 'NODE_ACTION_ID'}, res) - mock_lifecycle.assert_called_once_with(self.ctx, 'NODE_ACTION_ID') diff --git a/senlin/tests/unit/conductor/service/test_credentials.py b/senlin/tests/unit/conductor/service/test_credentials.py deleted file mode 100644 index ba32ae449..000000000 --- a/senlin/tests/unit/conductor/service/test_credentials.py +++ /dev/null @@ -1,101 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.conductor import service -from senlin.objects import credential as co -from senlin.objects.requests import credentials as vorc -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class CredentialTest(base.SenlinTestCase): - def setUp(self): - super(CredentialTest, self).setUp() - self.ctx = utils.dummy_context(user_id='fake_user_id', - project='fake_project_id') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(co.Credential, 'update_or_create') - def test_credential_create(self, mock_create): - trust_id = 'c8602dc1-677b-45bc-b732-3bc0d86d9537' - cred = {'openstack': {'trust': trust_id}} - req = vorc.CredentialCreateRequest(cred=cred, - attrs={'k1': 'v1'}) - - result = self.svc.credential_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'cred': cred}, result) - mock_create.assert_called_once_with( - self.ctx, - { - 'user': 'fake_user_id', - 'project': 'fake_project_id', - 'cred': { - 'openstack': { - 'trust': trust_id - } - } - } - ) - - @mock.patch.object(co.Credential, 'get') - def test_credential_get(self, mock_get): - x_data = {'openstack': {'foo': 'bar'}} - x_cred = mock.Mock(cred=x_data) - mock_get.return_value = x_cred - req = vorc.CredentialGetRequest(user=self.ctx.user_id, - project=self.ctx.project_id, - query={'k1': 'v1'}) - - result = self.svc.credential_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_get.assert_called_once_with( - self.ctx, u'fake_user_id', u'fake_project_id') - - @mock.patch.object(co.Credential, 'get') - def test_credential_get_not_found(self, mock_get): - mock_get.return_value = None - req = vorc.CredentialGetRequest(user=self.ctx.user_id, - project=self.ctx.project_id) - - result = self.svc.credential_get(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - mock_get.assert_called_once_with( - self.ctx, 'fake_user_id', 'fake_project_id') - - @mock.patch.object(co.Credential, 'get') - def test_credential_get_data_not_match(self, mock_get): - x_cred = mock.Mock(cred={'bogkey': 'bogval'}) - mock_get.return_value = x_cred - req = vorc.CredentialGetRequest(user=self.ctx.user_id, - project=self.ctx.project_id) - - result = self.svc.credential_get(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - mock_get.assert_called_once_with( - self.ctx, 'fake_user_id', 'fake_project_id') - - @mock.patch.object(co.Credential, 'update') - def test_credential_update(self, mock_update): - x_cred = 'fake_credential' - cred = {'openstack': {'trust': x_cred}} - req = vorc.CredentialUpdateRequest(cred=cred) - result = self.svc.credential_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'cred': cred}, result) - mock_update.assert_called_once_with( - self.ctx, 'fake_user_id', 'fake_project_id', {'cred': cred}) diff --git a/senlin/tests/unit/conductor/service/test_events.py b/senlin/tests/unit/conductor/service/test_events.py deleted file mode 100644 index 3d30a3924..000000000 --- a/senlin/tests/unit/conductor/service/test_events.py +++ /dev/null @@ -1,173 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.objects import cluster as co -from senlin.objects import event as eo -from senlin.objects.requests import events as oreo -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class EventTest(base.SenlinTestCase): - def setUp(self): - super(EventTest, self).setUp() - self.ctx = utils.dummy_context(project='event_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(eo.Event, 'get_all') - def test_event_list(self, mock_load): - obj_1 = mock.Mock() - obj_1.as_dict.return_value = {'level': consts.EVENT_LEVELS['DEBUG']} - obj_2 = mock.Mock() - obj_2.as_dict.return_value = {'level': consts.EVENT_LEVELS['INFO']} - - mock_load.return_value = [obj_1, obj_2] - - req = oreo.EventListRequest() - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - expected = [{'level': 'DEBUG'}, {'level': 'INFO'}] - - self.assertEqual(expected, result) - mock_load.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(eo.Event, 'get_all') - def test_event_list_with_params(self, mock_load): - obj_1 = mock.Mock() - obj_1.as_dict.return_value = {'level': consts.EVENT_LEVELS['DEBUG']} - obj_2 = mock.Mock() - obj_2.as_dict.return_value = {'level': consts.EVENT_LEVELS['INFO']} - - mock_load.return_value = [obj_1, obj_2] - - marker_uuid = '8216a86c-1bdc-442e-b493-329385d37cbc' - req = oreo.EventListRequest(level=['DEBUG', 'INFO'], - limit=123, - marker=marker_uuid, - sort=consts.EVENT_TIMESTAMP, - project_safe=True) - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - expected = [{'level': 'DEBUG'}, {'level': 'INFO'}] - self.assertEqual(expected, result) - - filters = {'level': [consts.EVENT_LEVELS['DEBUG'], - consts.EVENT_LEVELS['INFO']]} - mock_load.assert_called_once_with(self.ctx, filters=filters, - sort=consts.EVENT_TIMESTAMP, - limit=123, - marker=marker_uuid, - project_safe=True) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(eo.Event, 'get_all') - def test_event_list_with_cluster_id(self, mock_load, mock_find): - obj_1 = mock.Mock() - obj_1.as_dict.return_value = {'level': consts.EVENT_LEVELS['DEBUG']} - obj_2 = mock.Mock() - obj_2.as_dict.return_value = {'level': consts.EVENT_LEVELS['INFO']} - mock_load.return_value = [obj_1, obj_2] - fake_clusters = [mock.Mock(id='FAKE1'), mock.Mock(id='FAKE2')] - mock_find.side_effect = fake_clusters - - req = oreo.EventListRequest(cluster_id=['CLUSTERA', 'CLUSTER2'], - project_safe=True) - - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - - expected = [{'level': 'DEBUG'}, {'level': 'INFO'}] - self.assertEqual(expected, result) - - filters = {'cluster_id': ['FAKE1', 'FAKE2']} - mock_load.assert_called_once_with(self.ctx, filters=filters, - project_safe=True) - mock_find.assert_has_calls([ - mock.call(self.ctx, 'CLUSTERA'), - mock.call(self.ctx, 'CLUSTER2') - ]) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(eo.Event, 'get_all') - def test_event_list_with_cluster_not_found(self, mock_load, mock_find): - mock_find.side_effect = [ - mock.Mock(id='FAKE1'), - exc.ResourceNotFound(type='cluster', id='CLUSTER2'), - ] - req = oreo.EventListRequest(cluster_id=['CLUSTERA', 'CLUSTER2'], - project_safe=True) - - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([], result) - self.assertEqual(0, mock_load.call_count) - mock_find.assert_has_calls([ - mock.call(self.ctx, 'CLUSTERA'), - mock.call(self.ctx, 'CLUSTER2') - ]) - - def test_event_list_with_bad_params(self): - req = oreo.EventListRequest(project_safe=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.event_list, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - @mock.patch.object(eo.Event, 'get_all') - def test_event_list_with_Auth(self, mock_load): - mock_load.return_value = [] - - req = oreo.EventListRequest(project_safe=True) - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_load.assert_called_once_with(self.ctx, project_safe=True) - - self.ctx.is_admin = True - - mock_load.reset_mock() - req = oreo.EventListRequest(project_safe=True) - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_load.assert_called_once_with(self.ctx, project_safe=True) - - mock_load.reset_mock() - req = oreo.EventListRequest(project_safe=False) - result = self.svc.event_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_load.assert_called_once_with(self.ctx, project_safe=False) - - @mock.patch.object(eo.Event, 'find') - def test_event_get(self, mock_find): - x_event = mock.Mock() - x_event.as_dict.return_value = {'level': consts.EVENT_LEVELS['DEBUG']} - mock_find.return_value = x_event - - req = oreo.EventGetRequest(identity='EVENT_ID') - result = self.svc.event_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'level': 'DEBUG'}, result) - mock_find.assert_called_once_with(self.ctx, 'EVENT_ID') - - @mock.patch.object(eo.Event, 'find') - def test_event_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='event', id='BOGUS') - req = oreo.EventGetRequest(identity='BOGUS') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.event_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - mock_find.assert_called_once_with(self.ctx, 'BOGUS') diff --git a/senlin/tests/unit/conductor/service/test_nodes.py b/senlin/tests/unit/conductor/service/test_nodes.py deleted file mode 100644 index b1957b012..000000000 --- a/senlin/tests/unit/conductor/service/test_nodes.py +++ /dev/null @@ -1,1206 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.common import utils as common_utils -from senlin.conductor import service -from senlin.engine.actions import base as action_mod -from senlin.engine import dispatcher -from senlin.engine import environment -from senlin.engine import node as node_mod -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.objects import profile as po -from senlin.objects.requests import nodes as orno -from senlin.profiles import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class NodeTest(base.SenlinTestCase): - def setUp(self): - super(NodeTest, self).setUp() - self.ctx = utils.dummy_context(project='node_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(no.Node, 'get_all') - def test_node_list(self, mock_get): - obj_1 = mock.Mock() - obj_1.to_dict.return_value = {'k': 'v1'} - obj_2 = mock.Mock() - obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [obj_1, obj_2] - - req = orno.NodeListRequest() - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(no.Node, 'get_all') - def test_node_list_with_cluster_id(self, mock_get, mock_find): - obj_1 = mock.Mock() - obj_1.to_dict.return_value = {'k': 'v1'} - obj_2 = mock.Mock() - obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [obj_1, obj_2] - mock_find.return_value = mock.Mock(id='CLUSTER_ID') - - req = orno.NodeListRequest(cluster_id='MY_CLUSTER_NAME', - project_safe=True) - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_find.assert_called_once_with(self.ctx, 'MY_CLUSTER_NAME') - mock_get.assert_called_once_with(self.ctx, cluster_id='CLUSTER_ID', - project_safe=True) - - @mock.patch.object(no.Node, 'get_all') - def test_node_list_with_params(self, mock_get): - obj_1 = mock.Mock() - obj_1.to_dict.return_value = {'k': 'v1'} - obj_2 = mock.Mock() - obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [obj_1, obj_2] - - MARKER_UUID = '2fd5b45f-bae4-4cdb-b283-a71e9f9805c7' - req = orno.NodeListRequest(status=['ACTIVE'], sort='status', - limit=123, marker=MARKER_UUID, - project_safe=True) - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_get.assert_called_once_with(self.ctx, sort='status', limit=123, - marker=MARKER_UUID, project_safe=True, - filters={'status': ['ACTIVE']}) - - @mock.patch.object(co.Cluster, 'find') - def test_node_list_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', - id='BOGUS') - - req = orno.NodeListRequest(cluster_id='BOGUS', project_safe=True) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_list, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('Cannot find the given cluster: BOGUS.', - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'BOGUS') - - @mock.patch.object(no.Node, 'get_all') - def test_node_list_with_project_safe(self, mock_get): - mock_get.return_value = [] - - req = orno.NodeListRequest(project_safe=True) - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - mock_get.reset_mock() - - req = orno.NodeListRequest(project_safe=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_list, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - self.ctx.is_admin = True - req = orno.NodeListRequest(project_safe=False) - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=False) - mock_get.reset_mock() - - @mock.patch.object(no.Node, 'get_all') - def test_node_list_empty(self, mock_get): - mock_get.return_value = [] - - req = orno.NodeListRequest() - result = self.svc.node_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'create') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_node_create(self, notify, mock_profile, mock_node, mock_action): - mock_profile.return_value = mock.Mock(id='PROFILE_ID') - x_node = mock.Mock(id='NODE_ID') - x_node.to_dict.return_value = {'foo': 'bar'} - mock_node.return_value = x_node - mock_action.return_value = 'ACTION_ID' - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='PROFILE_NAME') - - result = self.svc.node_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'action': 'ACTION_ID'}, result) - mock_profile.assert_called_once_with(self.ctx, 'PROFILE_NAME') - mock_node.assert_called_once_with( - self.ctx, - { - 'name': 'NODE1', - 'profile_id': 'PROFILE_ID', - 'cluster_id': '', - 'index': -1, - 'role': '', - 'metadata': {}, - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': {}, - 'init_at': mock.ANY, - 'dependents': {}, - 'physical_id': None, - 'status': 'INIT', - 'status_reason': 'Initializing', - }) - mock_action.assert_called_once_with( - self.ctx, 'NODE_ID', consts.NODE_CREATE, - name='node_create_NODE_ID', - cluster_id='', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY) - notify.assert_called_once_with() - - @mock.patch.object(common_utils, 'format_node_name') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'create') - @mock.patch.object(co.Cluster, 'get_next_index') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_node_create_same_profile(self, notify, mock_profile, - mock_cluster, mock_index, - mock_node, mock_action, - mock_node_name): - mock_profile.return_value = mock.Mock(id='PROFILE_ID', - type='PROFILE_TYPE') - x_cluster = mock.Mock(id='CLUSTER_ID', profile_id='PROFILE_ID', - config={}) - mock_cluster.return_value = x_cluster - mock_index.return_value = 12345 - x_node = mock.Mock(id='NODE_ID') - x_node.to_dict.return_value = {'foo': 'bar'} - mock_node.return_value = x_node - mock_action.return_value = 'ACTION_ID' - mock_node_name.return_value = "GENERATED_NAME" - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='PROFILE_NAME', - cluster_id='FAKE_CLUSTER') - - result = self.svc.node_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'action': 'ACTION_ID'}, result) - mock_cluster.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_profile.assert_called_once_with(self.ctx, 'PROFILE_NAME') - mock_index.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_node.assert_called_once_with( - self.ctx, - { - 'name': 'GENERATED_NAME', - 'profile_id': 'PROFILE_ID', - 'cluster_id': 'CLUSTER_ID', - 'index': 12345, - 'role': '', - 'metadata': {}, - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': {}, - 'init_at': mock.ANY, - 'dependents': {}, - 'physical_id': None, - 'status': 'INIT', - 'status_reason': 'Initializing', - }) - mock_action.assert_called_once_with( - self.ctx, 'NODE_ID', consts.NODE_CREATE, - cluster_id='CLUSTER_ID', - name='node_create_NODE_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY) - notify.assert_called_once_with() - - @mock.patch.object(common_utils, "format_node_name") - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'create') - @mock.patch.object(co.Cluster, 'get_next_index') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(dispatcher, 'start_action') - def test_node_create_same_profile_type(self, notify, mock_profile, - mock_cluster, mock_index, - mock_node, mock_action, - mock_node_name): - mock_profile.side_effect = [ - mock.Mock(id='NODE_PROFILE_ID', type='PROFILE_TYPE'), - mock.Mock(id='CLUSTER_PROFILE_ID', type='PROFILE_TYPE'), - ] - x_cluster = mock.Mock(id='CLUSTER_ID', profile_id='CLUSTER_PROFILE_ID', - config={}) - mock_cluster.return_value = x_cluster - mock_index.return_value = 12345 - x_node = mock.Mock(id='NODE_ID') - x_node.to_dict.return_value = {'foo': 'bar'} - mock_node.return_value = x_node - mock_action.return_value = 'ACTION_ID' - mock_node_name.return_value = 'GENERATED_NAME' - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='PROFILE_NAME', - cluster_id='FAKE_CLUSTER') - - result = self.svc.node_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'action': 'ACTION_ID'}, result) - mock_cluster.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'PROFILE_NAME'), # for node - mock.call(self.ctx, 'CLUSTER_PROFILE_ID'), # for cluster - ]) - mock_index.assert_called_once_with(self.ctx, 'CLUSTER_ID') - mock_node.assert_called_once_with( - self.ctx, - { - 'name': 'GENERATED_NAME', - 'profile_id': 'NODE_PROFILE_ID', - 'cluster_id': 'CLUSTER_ID', - 'physical_id': None, - 'index': 12345, - 'role': '', - 'metadata': {}, - 'status': 'INIT', - 'status_reason': 'Initializing', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'data': {}, - 'dependents': {}, - 'init_at': mock.ANY, - }) - mock_action.assert_called_once_with( - self.ctx, 'NODE_ID', consts.NODE_CREATE, - name='node_create_NODE_ID', - cluster_id='CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY) - notify.assert_called_once_with() - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(no.Node, 'get_by_name') - def test_node_create_name_conflict(self, mock_find, mock_get): - cfg.CONF.set_override('name_unique', True) - mock_get.return_value = mock.Mock() - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='PROFILE_NAME') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual(_("The node named (NODE1) already exists."), - str(ex.exc_info[1])) - - @mock.patch.object(po.Profile, 'find') - def test_node_create_profile_not_found(self, mock_profile): - mock_profile.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified profile 'Bogus' could not be " - "found.", str(ex.exc_info[1])) - mock_profile.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Profile, 'find') - def test_node_create_cluster_not_found(self, mock_profile, mock_cluster): - mock_cluster.side_effect = exc.ResourceNotFound(type='cluster', - id='Bogus') - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='PROFILE_NAME', - cluster_id='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified cluster 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_cluster.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(po.Profile, 'find') - def test_node_create_profile_type_not_match(self, mock_profile, - mock_cluster): - mock_profile.side_effect = [ - mock.Mock(id='NODE_PROFILE_ID', type='TYPE-A'), - mock.Mock(id='CLUSTER_PROFILE_ID', type='TYPE-B'), - ] - mock_cluster.return_value = mock.Mock(id='CLUSTER_ID', - profile_id='CLUSTER_PROFILE_ID') - req = orno.NodeCreateRequestBody(name='NODE1', - profile_id='NODE_PROFILE', - cluster_id='FAKE_CLUSTER') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Node and cluster have different profile " - "type, operation aborted.", - str(ex.exc_info[1])) - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'NODE_PROFILE'), - mock.call(self.ctx, 'CLUSTER_PROFILE_ID'), - ]) - mock_cluster.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - - @mock.patch.object(no.Node, 'find') - def test_node_get(self, mock_find): - x_obj = mock.Mock(physical_id='PHYSICAL_ID') - x_obj.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_obj - req = orno.NodeGetRequest(identity='NODE1', show_details=False) - - result = self.svc.node_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'NODE1') - x_obj.to_dict.assert_called_once_with() - - @mock.patch.object(node_mod.Node, 'load') - @mock.patch.object(no.Node, 'find') - def test_node_get_with_details(self, mock_find, mock_load): - x_obj = mock.Mock(physical_id='PHYSICAL_ID') - x_obj.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_obj - x_node = mock.Mock() - x_node.get_details.return_value = {'info': 'blahblah'} - mock_load.return_value = x_node - - req = orno.NodeGetRequest(identity='NODE1', show_details=True) - result = self.svc.node_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'details': {'info': 'blahblah'}}, - result) - mock_find.assert_called_once_with(self.ctx, 'NODE1') - mock_load.assert_called_once_with(self.ctx, db_node=x_obj) - x_obj.to_dict.assert_called_once_with() - x_node.get_details.assert_called_once_with(self.ctx) - - @mock.patch.object(no.Node, 'find') - def test_node_get_node_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - req = orno.NodeGetRequest(identity='Bogus', show_details=False) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(no.Node, 'find') - def test_node_get_no_physical_id(self, mock_find): - x_obj = mock.Mock(physical_id=None) - x_obj.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_obj - req = orno.NodeGetRequest(identity='NODE1', show_details=True) - - result = self.svc.node_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'NODE1') - x_obj.to_dict.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_update(self, mock_find, mock_action, mock_start): - x_obj = mock.Mock(id='FAKE_NODE_ID', name='NODE1', role='ROLE1', - cluster_id='FAKE_CLUSTER_ID', - metadata={'KEY': 'VALUE'}) - x_obj.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_obj - mock_action.return_value = 'ACTION_ID' - - req = orno.NodeUpdateRequest(identity='FAKE_NODE', - name='NODE2', - role='NEW_ROLE', - metadata={'foo1': 'bar1'}) - - # all properties changed except profile id - result = self.svc.node_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_NODE_ID', consts.NODE_UPDATE, - name='node_update_FAKE_NOD', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={ - 'name': 'NODE2', - 'role': 'NEW_ROLE', - 'metadata': { - 'foo1': 'bar1', - } - }) - mock_start.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(no.Node, 'find') - # @mock.patch.object(co.Cluster, 'find') - def test_node_update_new_profile(self, mock_find, mock_profile, - mock_action, mock_start): - x_obj = mock.Mock(id='FAKE_NODE_ID', role='ROLE1', - cluster_id='FAKE_CLUSTER_ID', - metadata={'KEY': 'VALUE'}, - profile_id='OLD_PROFILE_ID') - x_obj.name = 'NODE1' - x_obj.to_dict.return_value = {'foo': 'bar'} - mock_find.return_value = x_obj - # Same profile type - mock_profile.side_effect = [ - mock.Mock(id='NEW_PROFILE_ID', type='PROFILE_TYPE'), - mock.Mock(id='OLD_PROFILE_ID', type='PROFILE_TYPE'), - ] - mock_action.return_value = 'ACTION_ID' - # all properties are filtered out except for profile_id - req = orno.NodeUpdateRequest(identity='FAKE_NODE', - name='NODE1', - role='ROLE1', - metadata={'KEY': 'VALUE'}, - profile_id='NEW_PROFILE') - result = self.svc.node_update(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar', 'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'NEW_PROFILE'), - mock.call(self.ctx, 'OLD_PROFILE_ID'), - ]) - mock_action.assert_called_once_with( - self.ctx, 'FAKE_NODE_ID', consts.NODE_UPDATE, - name='node_update_FAKE_NOD', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={ - 'new_profile_id': 'NEW_PROFILE_ID', - }) - mock_start.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - def test_node_update_node_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - - req = orno.NodeUpdateRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_update, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(no.Node, 'find') - def test_node_update_profile_not_found(self, mock_find, mock_profile): - mock_find.return_value = mock.Mock() - mock_profile.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - - req = orno.NodeUpdateRequest(identity='FAKE_NODE', - profile_id='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The specified profile 'Bogus' could not be " - "found.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_profile.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(no.Node, 'find') - def test_node_update_diff_profile_type(self, mock_find, mock_profile): - mock_find.return_value = mock.Mock(profile_id='OLD_PROFILE_ID') - mock_profile.side_effect = [ - mock.Mock(id='NEW_PROFILE_ID', type='NEW_PROFILE_TYPE'), - mock.Mock(id='OLD_PROFILE_ID', type='OLD_PROFILE_TYPE'), - ] - - req = orno.NodeUpdateRequest(identity='FAKE_NODE', - profile_id='NEW_PROFILE') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Cannot update a node to a different " - "profile type, operation aborted.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'NEW_PROFILE'), - mock.call(self.ctx, 'OLD_PROFILE_ID'), - ]) - - @mock.patch.object(po.Profile, 'find') - @mock.patch.object(no.Node, 'find') - def test_node_update_dumplicated_profile(self, mock_find, mock_profile): - mock_find.return_value = mock.Mock(profile_id='OLD_PROFILE_ID') - mock_profile.side_effect = [ - mock.Mock(id='OLD_PROFILE_ID', type='PROFILE_TYPE'), - mock.Mock(id='OLD_PROFILE_ID', type='PROFILE_TYPE'), - ] - - req = orno.NodeUpdateRequest(identity='FAKE_NODE', - profile_id='OLD_PROFILE_ID') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("No property needs an update.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_profile.assert_has_calls([ - mock.call(self.ctx, 'OLD_PROFILE_ID'), - mock.call(self.ctx, 'OLD_PROFILE_ID'), - ]) - - @mock.patch.object(no.Node, 'find') - def test_node_update_no_property_for_update(self, mock_find): - x_obj = mock.Mock(id='FAKE_NODE_ID', name='NODE1', role='ROLE1', - metadata={'KEY': 'VALUE'}) - mock_find.return_value = x_obj - - # no property has been specified for update - req = orno.NodeUpdateRequest(identity='FAKE_NODE') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("No property needs an update.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_delete(self, mock_find, mock_action, mock_start): - mock_find.return_value = mock.Mock(id='12345678AB', status='ACTIVE', - cluster_id='', - dependents={}) - mock_action.return_value = 'ACTION_ID' - - req = orno.NodeDeleteRequest(identity='FAKE_NODE', force=False) - result = self.svc.node_delete(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_DELETE, - name='node_delete_12345678', - cluster_id='', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY) - mock_start.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - def test_node_delete_node_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - - req = orno.NodeDeleteRequest(identity='Bogus', force=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_delete, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(no.Node, 'find') - def test_node_delete_improper_status(self, mock_find): - for bad_status in [consts.NS_CREATING, consts.NS_UPDATING, - consts.NS_DELETING, consts.NS_RECOVERING]: - fake_node = mock.Mock(id='12345678AB', status=bad_status) - mock_find.return_value = fake_node - req = orno.NodeDeleteRequest(identity='BUSY', force=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ActionInProgress, ex.exc_info[0]) - self.assertEqual("The node 'BUSY' is in status %s." % bad_status, - str(ex.exc_info[1])) - # skipping assertion on mock_find - - @mock.patch.object(no.Node, 'find') - def test_node_delete_have_dependency(self, mock_find): - dependents = {'nodes': ['NODE1']} - node = mock.Mock(id='NODE_ID', status='ACTIVE', dependents=dependents) - mock_find.return_value = node - req = orno.NodeDeleteRequest(identity='node1', force=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_delete, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - self.assertEqual("The node 'node1' cannot be deleted: still depended " - "by other clusters and/or nodes.", - str(ex.exc_info[1])) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_delete_force(self, mock_find, mock_action, mock_start): - for bad_status in [consts.NS_CREATING, consts.NS_UPDATING, - consts.NS_DELETING, consts.NS_RECOVERING]: - - mock_find.return_value = mock.Mock(id='12345678AB', - status=bad_status, - dependents={}, - cluster_id='',) - mock_action.return_value = 'ACTION_ID' - - req = orno.NodeDeleteRequest(identity='FAKE_NODE', force=True) - result = self.svc.node_delete(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_with( - self.ctx, '12345678AB', consts.NODE_DELETE, - name='node_delete_12345678', - cluster_id='', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY) - mock_start.assert_called_with() - - @mock.patch.object(environment.Environment, 'get_profile') - @mock.patch.object(pb.Profile, 'adopt_node') - def test_node_adopt_preview_with_profile(self, mock_adopt, mock_profile): - class FakeProfile(object): - pass - - req = mock.Mock( - identity="FAKE_NODE", - type="TestProfile-1.0", - overrides="foo", - snapshot=True - ) - mock_adopt.return_value = {'prop': 'value'} - mock_profile.return_value = FakeProfile - - c, s = self.svc._node_adopt_preview(self.ctx, req) - - req.obj_set_defaults.assert_called_once_with() - mock_profile.assert_called_once_with("TestProfile-1.0") - self.assertEqual(FakeProfile, c) - mock_adopt.assert_called_once_with( - self.ctx, mock.ANY, 'TestProfile-1.0', - overrides="foo", snapshot=True) - fake_node = mock_adopt.call_args[0][1] - self.assertIsInstance(fake_node, node_mod.Node) - self.assertEqual('adopt', fake_node.name) - self.assertEqual('TBD', fake_node.profile_id) - self.assertEqual('FAKE_NODE', fake_node.physical_id) - expected = { - 'type': 'TestProfile', - 'version': '1.0', - 'properties': {'prop': 'value'} - } - self.assertEqual(expected, s) - - @mock.patch.object(pb.Profile, 'adopt_node') - def test_node_adopt_preview_bad_type(self, mock_adopt): - req = mock.Mock( - identity="FAKE_NODE", - type="TestProfile-1.0", - overrides="foo", - snapshot=True - ) - - ex = self.assertRaises(exc.BadRequest, - self.svc._node_adopt_preview, - self.ctx, req) - - req.obj_set_defaults.assert_called_once_with() - self.assertEqual("The profile_type 'TestProfile-1.0' could not be " - "found.", str(ex)) - - @mock.patch.object(environment.Environment, 'get_profile') - @mock.patch.object(pb.Profile, 'adopt_node') - def test_node_adopt_preview_failed_adopt(self, mock_adopt, mock_profile): - class FakeProfile(object): - pass - - req = mock.Mock( - identity="FAKE_NODE", - type="TestProfile-1.0", - overrides="foo", - snapshot=True - ) - mock_profile.return_value = FakeProfile - mock_adopt.return_value = { - 'Error': {'code': 502, 'message': 'something is bad'} - } - - ex = self.assertRaises(exc.ProfileOperationFailed, - self.svc._node_adopt_preview, - self.ctx, req) - - req.obj_set_defaults.assert_called_once_with() - mock_profile.assert_called_once_with("TestProfile-1.0") - mock_adopt.assert_called_once_with( - self.ctx, mock.ANY, 'TestProfile-1.0', - overrides="foo", snapshot=True) - - self.assertEqual('502: something is bad', str(ex)) - - @mock.patch.object(service.ConductorService, '_node_adopt_preview') - def test_node_adopt_preview(self, mock_preview): - spec = {'foo': 'bar'} - mock_preview.return_value = mock.Mock(), spec - req = orno.NodeAdoptPreviewRequest(identity='FAKE_ID', - type='FAKE_TYPE') - - res = self.svc.node_adopt_preview(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'node_preview': {'foo': 'bar'}}, res) - mock_preview.assert_called_once_with(self.ctx, mock.ANY) - self.assertIsInstance(mock_preview.call_args[0][1], - orno.NodeAdoptPreviewRequest) - - @mock.patch.object(service.ConductorService, '_node_adopt_preview') - def test_node_adopt_preview_with_exception(self, mock_preview): - mock_preview.side_effect = exc.BadRequest(msg="boom") - req = orno.NodeAdoptPreviewRequest(identity='FAKE_ID', - type='FAKE_TYPE') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_adopt_preview, - self.ctx, req.obj_to_primitive()) - - mock_preview.assert_called_once_with(self.ctx, mock.ANY) - self.assertIsInstance(mock_preview.call_args[0][1], - orno.NodeAdoptPreviewRequest) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('boom.', str(ex.exc_info[1])) - - @mock.patch.object(no.Node, 'create') - @mock.patch.object(service.ConductorService, '_node_adopt_preview') - def test_node_adopt(self, mock_preview, mock_create): - class FakeProfile(object): - - @classmethod - def create(cls, ctx, name, spec): - obj = mock.Mock(spec=spec, id='PROFILE_ID') - obj.name = name - return obj - - req = orno.NodeAdoptRequest(identity='FAKE_ID', type='FAKE_TYPE') - mock_preview.return_value = FakeProfile, {'foo': 'bar'} - fake_node = mock.Mock() - fake_node.to_dict = mock.Mock(return_value={'attr': 'value'}) - mock_create.return_value = fake_node - - res = self.svc.node_adopt(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'attr': 'value'}, res) - mock_preview.assert_called_once_with(self.ctx, mock.ANY) - self.assertIsInstance(mock_preview.call_args[0][1], - orno.NodeAdoptRequest) - attrs = { - 'name': mock.ANY, - 'data': {}, - 'dependents': {}, - 'profile_id': 'PROFILE_ID', - 'physical_id': 'FAKE_ID', - 'cluster_id': '', - 'index': -1, - 'role': '', - 'metadata': {}, - 'status': consts.NS_ACTIVE, - 'status_reason': 'Node adopted successfully', - 'init_at': mock.ANY, - 'created_at': mock.ANY, - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id - } - mock_create.assert_called_once_with(self.ctx, attrs) - - @mock.patch.object(no.Node, 'get_by_name') - def test_node_adopt_name_not_unique(self, mock_get): - cfg.CONF.set_override('name_unique', True) - req = orno.NodeAdoptRequest( - name='FAKE_NAME', preview=False, - identity='FAKE_ID', type='FAKE_TYPE') - mock_get.return_value = mock.Mock() - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_adopt, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The node named (FAKE_NAME) already exists.", - str(ex.exc_info[1])) - - @mock.patch.object(no.Node, 'create') - @mock.patch.object(service.ConductorService, '_node_adopt_preview') - def test_node_adopt_failed_preview(self, mock_preview, mock_create): - req = orno.NodeAdoptRequest(identity='FAKE_ID', type='FAKE_TYPE') - mock_preview.side_effect = exc.BadRequest(msg='boom') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_adopt, - self.ctx, req.obj_to_primitive()) - - mock_preview.assert_called_once_with(self.ctx, mock.ANY) - self.assertIsInstance(mock_preview.call_args[0][1], - orno.NodeAdoptRequest) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("boom.", str(ex.exc_info[1])) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_check(self, mock_find, mock_action, mock_start): - mock_find.return_value = mock.Mock(id='12345678AB', - cluster_id='FAKE_CLUSTER_ID') - mock_action.return_value = 'ACTION_ID' - - params = {'k1': 'v1'} - req = orno.NodeCheckRequest(identity='FAKE_NODE', params=params) - result = self.svc.node_check(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_CHECK, - name='node_check_12345678', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'k1': 'v1'}) - mock_start.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - def test_node_check_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - - req = orno.NodeCheckRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_check, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover(self, mock_find, mock_action, mock_start): - mock_find.return_value = mock.Mock( - id='12345678AB', cluster_id='FAKE_CLUSTER_ID') - mock_action.return_value = 'ACTION_ID' - - params = {'operation': 'REBOOT'} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - result = self.svc.node_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_RECOVER, - name='node_recover_12345678', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'operation': 'REBOOT'}) - mock_start.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_with_check(self, mock_find, mock_action, mock_start): - mock_find.return_value = mock.Mock(id='12345678AB', cluster_id='') - mock_action.return_value = 'ACTION_ID' - - params = {'check': True, 'operation': 'REBUILD'} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - result = self.svc.node_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_RECOVER, - name='node_recover_12345678', - cluster_id='', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'check': True, 'operation': 'REBUILD'}) - mock_start.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_with_delete_timeout(self, mock_find, mock_action, - mock_start): - mock_find.return_value = mock.Mock(id='12345678AB', cluster_id='',) - mock_action.return_value = 'ACTION_ID' - - params = {'delete_timeout': 20, 'operation': 'RECREATE'} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - result = self.svc.node_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_RECOVER, - name='node_recover_12345678', - cluster_id='', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'delete_timeout': 20, - 'operation': 'RECREATE'}) - mock_start.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_with_force_recreate(self, mock_find, mock_action, - mock_start): - mock_find.return_value = mock.Mock( - id='12345678AB', cluster_id='FAKE_CLUSTER_ID') - mock_action.return_value = 'ACTION_ID' - - params = {'force_recreate': True, 'operation': 'reboot', - 'operation_params': {'type': 'soft'}} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - result = self.svc.node_recover(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_RECOVER, - name='node_recover_12345678', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'force_recreate': True, - 'operation': 'reboot', - 'operation_params': {'type': 'soft'}}) - mock_start.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - def test_node_recover_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - - req = orno.NodeRecoverRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_unknown_operation(self, mock_find, mock_action): - mock_find.return_value = mock.Mock(id='12345678AB') - mock_action.return_value = 'ACTION_ID' - params = {'bogus': 'illegal'} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Action parameter ['bogus'] is not recognizable.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - self.assertEqual(0, mock_action.call_count) - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_invalid_operation(self, mock_find, mock_action): - mock_find.return_value = mock.Mock(id='12345678AB') - mock_action.return_value = 'ACTION_ID' - params = {'force_recreate': True, 'operation': 'blah', - 'operation_params': {'type': 'soft'}} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Operation value 'blah' has to be one of the " - "following: REBOOT, REBUILD, RECREATE.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - self.assertEqual(0, mock_action.call_count) - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(no.Node, 'find') - def test_node_recover_invalid_operation_params(self, mock_find, - mock_action): - mock_find.return_value = mock.Mock(id='12345678AB') - mock_action.return_value = 'ACTION_ID' - params = {'force_recreate': True, 'operation': 'REBOOT', - 'operation_params': {'type': 'blah'}} - req = orno.NodeRecoverRequest(identity='FAKE_NODE', params=params) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_recover, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Type field 'blah' in operation_params has to be one " - "of the following: SOFT, HARD.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - self.assertEqual(0, mock_action.call_count) - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(node_mod.Node, 'load') - @mock.patch.object(no.Node, 'find') - def test_node_op(self, mock_find, mock_node, mock_action, mock_start): - x_db_node = mock.Mock(id='12345678AB', cluster_id='FAKE_CLUSTER_ID') - mock_find.return_value = x_db_node - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_node = mock.Mock() - x_node.rt = {'profile': x_profile} - mock_node.return_value = x_node - mock_action.return_value = 'ACTION_ID' - params = {'style': 'tango'} - req = orno.NodeOperationRequest(identity='FAKE_NODE', - operation='dance', - params=params) - - result = self.svc.node_op(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_NODE') - mock_node.assert_called_once_with(self.ctx, db_node=x_db_node) - x_schema.validate.assert_called_once_with({'style': 'tango'}) - mock_action.assert_called_once_with( - self.ctx, '12345678AB', consts.NODE_OPERATION, - name='node_dance_12345678', - cluster_id='FAKE_CLUSTER_ID', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'operation': 'dance', 'params': {'style': 'tango'}}) - mock_start.assert_called_once_with() - - @mock.patch.object(no.Node, 'find') - def test_node_op_node_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='node', id='Bogus') - - req = orno.NodeOperationRequest(identity='Bogus', operation='dance', - params={}) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The node 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(node_mod.Node, 'load') - @mock.patch.object(no.Node, 'find') - def test_node_op_unsupported_operation(self, mock_find, mock_node): - x_db_node = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_node - x_schema = mock.Mock() - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}, type='cow') - x_node = mock.Mock() - x_node.rt = {'profile': x_profile} - mock_node.return_value = x_node - - req = orno.NodeOperationRequest(identity='node1', operation='swim', - params={}) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The requested operation 'swim' is not " - "supported by the profile type 'cow'.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'node1') - mock_node.assert_called_once_with(self.ctx, db_node=x_db_node) - - @mock.patch.object(node_mod.Node, 'load') - @mock.patch.object(no.Node, 'find') - def test_node_op_bad_parameters(self, mock_find, mock_node): - x_db_node = mock.Mock(id='12345678AB') - mock_find.return_value = x_db_node - x_schema = mock.Mock() - x_schema.validate.side_effect = exc.ESchema(message='Boom') - x_profile = mock.Mock(OPERATIONS={'dance': x_schema}) - x_node = mock.Mock() - x_node.rt = {'profile': x_profile} - mock_node.return_value = x_node - - req = orno.NodeOperationRequest(identity='node1', operation='dance', - params={'style': 'tango'}) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.node_op, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Boom.", str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'node1') - mock_node.assert_called_once_with(self.ctx, db_node=x_db_node) - x_schema.validate.assert_called_once_with({'style': 'tango'}) diff --git a/senlin/tests/unit/conductor/service/test_policies.py b/senlin/tests/unit/conductor/service/test_policies.py deleted file mode 100644 index 5019a14c0..000000000 --- a/senlin/tests/unit/conductor/service/test_policies.py +++ /dev/null @@ -1,375 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_config import cfg -from oslo_messaging.rpc import dispatcher as rpc -from oslo_utils import uuidutils - -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.conductor import service -from senlin.engine import environment -from senlin.objects import policy as po -from senlin.objects.requests import policies as orpo -from senlin.policies import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit import fakes - - -class PolicyTest(base.SenlinTestCase): - def setUp(self): - super(PolicyTest, self).setUp() - self.ctx = utils.dummy_context(project='policy_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - def _setup_fakes(self): - """Set up fake policy for the purpose of testing. - - This method is provided in a standalone function because not all - test cases need such a set up. - """ - environment.global_env().register_policy('TestPolicy-1.0', - fakes.TestPolicy) - self.spec = { - 'type': 'TestPolicy', - 'version': '1.0', - 'properties': { - 'KEY2': 6 - } - } - - @mock.patch.object(po.Policy, 'get_all') - def test_policy_list(self, mock_get): - x_obj_1 = mock.Mock() - x_obj_1.to_dict.return_value = {'k': 'v1'} - x_obj_2 = mock.Mock() - x_obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [x_obj_1, x_obj_2] - req = orpo.PolicyListRequest(project_safe=True) - - result = self.svc.policy_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(po.Policy, 'get_all') - def test_policy_list_with_params(self, mock_get): - mock_get.return_value = [] - marker = uuidutils.generate_uuid() - params = { - 'limit': 10, - 'marker': marker, - 'name': ['test-policy'], - 'type': ['senlin.policy.scaling-1.0'], - 'sort': 'name:asc', - 'project_safe': True - } - req = orpo.PolicyListRequest(**params) - - result = self.svc.policy_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([], result) - mock_get.assert_called_once_with( - self.ctx, limit=10, marker=marker, sort='name:asc', - filters={'name': ['test-policy'], - 'type': ['senlin.policy.scaling-1.0']}, - project_safe=True) - - def test_policy_create_default(self): - self._setup_fakes() - req = orpo.PolicyCreateRequestBody(name='Fake', spec=self.spec) - - result = self.svc.policy_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual('Fake', result['name']) - self.assertEqual('TestPolicy-1.0', result['type']) - self.assertIsNone(result['updated_at']) - self.assertIsNotNone(result['created_at']) - self.assertIsNotNone(result['id']) - - @mock.patch.object(po.Policy, 'get_by_name') - def test_policy_create_name_conflict(self, mock_get): - cfg.CONF.set_override('name_unique', True) - mock_get.return_value = mock.Mock() - - spec = { - 'type': 'FakePolicy', - 'version': '1.0', - 'properties': { - 'KEY2': 6 - } - } - - req = orpo.PolicyCreateRequestBody(name='FAKE_NAME', spec=spec) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("A policy named 'FAKE_NAME' already exists.", - str(ex.exc_info[1])) - mock_get.assert_called_once_with(self.ctx, 'FAKE_NAME') - - def test_policy_create_type_not_found(self): - # We skip the fakes setup, so we won't get the proper policy type - spec = { - 'type': 'FakePolicy', - 'version': '1.0', - 'properties': { - 'KEY2': 6 - } - } - - req = orpo.PolicyCreateRequestBody(name='Fake', spec=spec) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The policy_type 'FakePolicy-1.0' could " - "not be found.", - str(ex.exc_info[1])) - - def test_policy_create_invalid_spec(self): - # This test is for the policy object constructor which may throw - # exceptions if the spec is invalid - self._setup_fakes() - spec = copy.deepcopy(self.spec) - spec['properties'] = {'KEY3': 'value3'} - - req = orpo.PolicyCreateRequestBody(name='Fake', spec=spec) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.ESchema, ex.exc_info[0]) - self.assertEqual("Required spec item 'KEY2' not provided", - str(ex.exc_info[1])) - - def test_policy_create_invalid_value(self): - self._setup_fakes() - spec = copy.deepcopy(self.spec) - spec['properties']['KEY2'] = 'value3' - - mock_validate = self.patchobject(fakes.TestPolicy, 'validate') - mock_validate.side_effect = exc.InvalidSpec( - message="The specified KEY2 'value3' could not be found.") - - req = orpo.PolicyCreateRequestBody(name='Fake', spec=spec) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.InvalidSpec, ex.exc_info[0]) - self.assertEqual("The specified KEY2 'value3' could not be " - "found.", str(ex.exc_info[1])) - - def test_policy_create_failed_validation(self): - self._setup_fakes() - - mock_validate = self.patchobject(fakes.TestPolicy, 'validate') - mock_validate.side_effect = exc.InvalidSpec(message='BOOM') - - req = orpo.PolicyCreateRequestBody(name='Fake', spec=self.spec) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.InvalidSpec, ex.exc_info[0]) - self.assertEqual('BOOM', str(ex.exc_info[1])) - - def test_policy_validate_pass(self): - self._setup_fakes() - - expected_resp = { - 'created_at': None, - 'domain': '', - 'id': None, - 'data': {}, - 'name': 'validated_policy', - 'project': 'policy_test_project', - 'type': 'TestPolicy-1.0', - 'updated_at': None, - 'user': 'test_user_id', - 'spec': { - 'type': 'TestPolicy', - 'version': '1.0', - 'properties': { - 'KEY2': 6 - } - } - } - - body = orpo.PolicyValidateRequestBody(spec=self.spec) - - resp = self.svc.policy_validate(self.ctx, body.obj_to_primitive()) - self.assertEqual(expected_resp, resp) - - def test_policy_validate_failed(self): - self._setup_fakes() - mock_validate = self.patchobject(fakes.TestPolicy, 'validate') - mock_validate.side_effect = exc.InvalidSpec(message='BOOM') - - body = orpo.PolicyValidateRequestBody(spec=self.spec) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_validate, - self.ctx, body.obj_to_primitive()) - self.assertEqual(exc.InvalidSpec, ex.exc_info[0]) - self.assertEqual('BOOM', - str(ex.exc_info[1])) - - @mock.patch.object(po.Policy, 'find') - def test_policy_get(self, mock_find): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_obj.to_dict.return_value = {'foo': 'bar'} - req = orpo.PolicyGetRequest(identity='FAKE_POLICY') - - result = self.svc.policy_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_POLICY') - - @mock.patch.object(po.Policy, 'find') - def test_policy_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='policy', - id='Fake') - req = orpo.PolicyGetRequest(identity='POLICY') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(pb.Policy, 'load') - @mock.patch.object(po.Policy, 'find') - def test_policy_update(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_policy = mock.Mock() - x_policy.name = 'OLD_NAME' - x_policy.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_policy - p_req = orpo.PolicyUpdateRequestBody(name='NEW_NAME') - request = { - 'identity': 'FAKE', - 'policy': p_req - } - - req = orpo.PolicyUpdateRequest(**request) - - result = self.svc.policy_update(self.ctx, req.obj_to_primitive()) - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'FAKE') - mock_load.assert_called_once_with(self.ctx, db_policy=x_obj) - - @mock.patch.object(po.Policy, 'find') - def test_policy_update_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='policy', - id='Fake') - p_req = orpo.PolicyUpdateRequestBody(name='NEW_NAME') - request = { - 'identity': 'Fake', - 'policy': p_req - } - - req = orpo.PolicyUpdateRequest(**request) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(pb.Policy, 'load') - @mock.patch.object(po.Policy, 'find') - def test_policy_update_no_change(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_policy = mock.Mock() - x_policy.name = 'OLD_NAME' - x_policy.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_policy - body = { - 'name': 'OLD_NAME', - } - p_req = orpo.PolicyUpdateRequestBody(**body) - request = { - 'identity': 'FAKE', - 'policy': p_req - } - - req = orpo.PolicyUpdateRequest(**request) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('No property needs an update.', - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'FAKE') - mock_load.assert_called_once_with(self.ctx, db_policy=x_obj) - self.assertEqual(0, x_policy.store.call_count) - self.assertEqual('OLD_NAME', x_policy.name) - - @mock.patch.object(pb.Policy, 'delete') - @mock.patch.object(po.Policy, 'find') - def test_policy_delete(self, mock_find, mock_delete): - x_obj = mock.Mock(id='POLICY_ID') - mock_find.return_value = x_obj - mock_delete.return_value = None - - req = orpo.PolicyDeleteRequest(identity='POLICY_ID') - result = self.svc.policy_delete(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - self.assertEqual('POLICY_ID', req.identity) - mock_find.assert_called_once_with(self.ctx, 'POLICY_ID') - mock_delete.assert_called_once_with(self.ctx, 'POLICY_ID') - - @mock.patch.object(po.Policy, 'find') - def test_policy_delete_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='policy', id='Bogus') - - req = orpo.PolicyDeleteRequest(identity='Bogus') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_delete, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The policy 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(pb.Policy, 'delete') - @mock.patch.object(po.Policy, 'find') - def test_policy_delete_policy_in_use(self, mock_find, mock_delete): - x_obj = mock.Mock(id='POLICY_ID') - mock_find.return_value = x_obj - err = exc.EResourceBusy(type='policy', id='POLICY_ID') - mock_delete.side_effect = err - - req = orpo.PolicyDeleteRequest(identity='POLICY_ID') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_delete, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - self.assertEqual(_("The policy 'POLICY_ID' cannot be deleted: " - "still attached to some clusters."), - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'POLICY_ID') - mock_delete.assert_called_once_with(self.ctx, 'POLICY_ID') diff --git a/senlin/tests/unit/conductor/service/test_policy_types.py b/senlin/tests/unit/conductor/service/test_policy_types.py deleted file mode 100644 index c1a721ade..000000000 --- a/senlin/tests/unit/conductor/service/test_policy_types.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine import environment -from senlin.objects.requests import policy_type as orpt -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class PolicyTypeTest(base.SenlinTestCase): - def setUp(self): - super(PolicyTypeTest, self).setUp() - self.ctx = utils.dummy_context(project='policy_type_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(environment, 'global_env') - def test_policy_type_list(self, mock_env): - x_env = mock.Mock() - x_env.get_policy_types.return_value = [{'foo': 'bar'}] - mock_env.return_value = x_env - - req = orpt.PolicyTypeListRequest() - types = self.svc.policy_type_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'foo': 'bar'}], types) - mock_env.assert_called_once_with() - x_env.get_policy_types.assert_called_once_with() - - @mock.patch.object(environment, 'global_env') - def test_policy_type_get(self, mock_env): - x_env = mock.Mock() - x_policy_type = mock.Mock() - x_policy_type.get_schema.return_value = {'foo': 'bar'} - x_policy_type.VERSIONS = {'1.0': [{'status': 'supported', - 'since': '2016.04'}]} - x_env.get_policy.return_value = x_policy_type - mock_env.return_value = x_env - - req = orpt.PolicyTypeGetRequest(type_name='FAKE_TYPE') - result = self.svc.policy_type_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual( - { - 'name': 'FAKE_TYPE', - 'schema': {'foo': 'bar'}, - 'support_status': {'1.0': [{'status': 'supported', - 'since': '2016.04'}]} - }, - result) - mock_env.assert_called_once_with() - x_env.get_policy.assert_called_once_with('FAKE_TYPE') - x_policy_type.get_schema.assert_called_once_with() - - @mock.patch.object(environment, 'global_env') - def test_policy_type_get_nonexist(self, mock_env): - x_env = mock.Mock() - err = exc.ResourceNotFound(type='policy_type', id='FAKE_TYPE') - x_env.get_policy.side_effect = err - mock_env.return_value = x_env - - req = orpt.PolicyTypeGetRequest(type_name='FAKE_TYPE') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.policy_type_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The policy_type 'FAKE_TYPE' could not be " - "found.", str(ex.exc_info[1])) - mock_env.assert_called_once_with() - x_env.get_policy.assert_called_once_with('FAKE_TYPE') diff --git a/senlin/tests/unit/conductor/service/test_profile_types.py b/senlin/tests/unit/conductor/service/test_profile_types.py deleted file mode 100644 index f6aaf4ded..000000000 --- a/senlin/tests/unit/conductor/service/test_profile_types.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine import environment -from senlin.objects.requests import profile_type as vorp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ProfileTypeTest(base.SenlinTestCase): - def setUp(self): - super(ProfileTypeTest, self).setUp() - self.ctx = utils.dummy_context(project='profile_type_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(environment, 'global_env') - def test_profile_type_list(self, mock_env): - x_env = mock.Mock() - x_env.get_profile_types.return_value = [{'foo': 'bar'}] - mock_env.return_value = x_env - - req = vorp.ProfileTypeListRequest() - types = self.svc.profile_type_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'foo': 'bar'}], types) - mock_env.assert_called_once_with() - x_env.get_profile_types.assert_called_once_with() - - @mock.patch.object(environment, 'global_env') - def test_profile_type_get(self, mock_env): - x_env = mock.Mock() - x_profile_type = mock.Mock() - x_profile_type.get_schema.return_value = {'foo': 'bar'} - x_profile_type.VERSIONS = {'1.0': [{'status': 'supported', - 'since': '2016.04'}]} - x_env.get_profile.return_value = x_profile_type - mock_env.return_value = x_env - - req = vorp.ProfileTypeGetRequest(type_name='FAKE_TYPE') - result = self.svc.profile_type_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual( - { - 'name': 'FAKE_TYPE', - 'schema': {'foo': 'bar'}, - 'support_status': {'1.0': [{'status': 'supported', - 'since': '2016.04'}]} - }, - result) - mock_env.assert_called_once_with() - x_env.get_profile.assert_called_once_with('FAKE_TYPE') - x_profile_type.get_schema.assert_called_once_with() - - @mock.patch.object(environment, 'global_env') - def test_profile_type_get_nonexist(self, mock_env): - x_env = mock.Mock() - err = exc.ResourceNotFound(type='profile_type', id='FAKE_TYPE') - x_env.get_profile.side_effect = err - mock_env.return_value = x_env - - req = vorp.ProfileTypeGetRequest(type_name='FAKE_TYPE') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_type_get, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The profile_type 'FAKE_TYPE' could not be " - "found.", str(ex.exc_info[1])) - mock_env.assert_called_once_with() - x_env.get_profile.assert_called_once_with('FAKE_TYPE') - - @mock.patch.object(environment, 'global_env') - def test_profile_type_ops(self, mock_env): - x_env = mock.Mock() - x_profile_type = mock.Mock() - x_profile_type.get_ops.return_value = {'foo': 'bar'} - x_env.get_profile.return_value = x_profile_type - mock_env.return_value = x_env - - req = vorp.ProfileTypeOpListRequest(type_name='FAKE_TYPE') - ops = self.svc.profile_type_ops(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'operations': {'foo': 'bar'}}, ops) - mock_env.assert_called_once_with() - x_env.get_profile.assert_called_once_with('FAKE_TYPE') - x_profile_type.get_ops.assert_called_once_with() - - @mock.patch.object(environment, 'global_env') - def test_profile_type_ops_not_found(self, mock_env): - x_env = mock.Mock() - err = exc.ResourceNotFound(type='profile_type', id='FAKE_TYPE') - x_env.get_profile.side_effect = err - mock_env.return_value = x_env - req = vorp.ProfileTypeOpListRequest(type_name='FAKE_TYPE') - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_type_ops, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The profile_type 'FAKE_TYPE' could not be found.", - str(ex.exc_info[1])) - - mock_env.assert_called_once_with() - x_env.get_profile.assert_called_once_with('FAKE_TYPE') diff --git a/senlin/tests/unit/conductor/service/test_profiles.py b/senlin/tests/unit/conductor/service/test_profiles.py deleted file mode 100644 index ef29dc7bd..000000000 --- a/senlin/tests/unit/conductor/service/test_profiles.py +++ /dev/null @@ -1,375 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_config import cfg -from oslo_messaging.rpc import dispatcher as rpc -from oslo_utils import uuidutils - -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine import environment -from senlin.objects import profile as po -from senlin.objects.requests import profiles as vorp -from senlin.profiles import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit import fakes - - -class ProfileTest(base.SenlinTestCase): - def setUp(self): - super(ProfileTest, self).setUp() - self.ctx = utils.dummy_context(project='profile_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - def _setup_fakes(self): - """Set up fake profile for the purpose of testing. - - This method is provided in a standalone function because not all - test cases need such a set up. - """ - environment.global_env().register_profile('TestProfile-1.0', - fakes.TestProfile) - self.spec = { - 'type': 'TestProfile', - 'version': '1.0', - 'properties': { - 'INT': 1, - 'STR': 'str', - 'LIST': ['v1', 'v2'], - 'MAP': {'KEY1': 1, 'KEY2': 'v2'}, - } - } - - @mock.patch.object(po.Profile, 'get_all') - def test_profile_list(self, mock_get): - x_obj_1 = mock.Mock() - x_obj_1.to_dict.return_value = {'k': 'v1'} - x_obj_2 = mock.Mock() - x_obj_2.to_dict.return_value = {'k': 'v2'} - mock_get.return_value = [x_obj_1, x_obj_2] - req = vorp.ProfileListRequest(project_safe=True) - - result = self.svc.profile_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([{'k': 'v1'}, {'k': 'v2'}], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(po.Profile, 'get_all') - def test_profile_list_with_params(self, mock_get): - mock_get.return_value = [] - marker = uuidutils.generate_uuid() - params = { - 'limit': 10, - 'marker': marker, - 'name': ['foo'], - 'type': ['os.nova.server'], - 'sort': 'name:asc', - 'project_safe': True - } - req = vorp.ProfileListRequest(**params) - - result = self.svc.profile_list(self.ctx, req.obj_to_primitive()) - - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, limit=10, marker=marker, - filters={'name': ['foo'], - 'type': ['os.nova.server']}, - sort='name:asc', - project_safe=True) - - @mock.patch.object(pb.Profile, 'create') - def test_profile_create_default(self, mock_create): - x_profile = mock.Mock() - x_profile.to_dict.return_value = {'foo': 'bar'} - mock_create.return_value = x_profile - self._setup_fakes() - body = vorp.ProfileCreateRequestBody(name='p-1', spec=self.spec, - metadata={'foo': 'bar'}) - req = vorp.ProfileCreateRequest(profile=body) - - result = self.svc.profile_create(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - - @mock.patch.object(po.Profile, 'get_by_name') - def test_profile_create_name_conflict(self, mock_get): - cfg.CONF.set_override('name_unique', True) - mock_get.return_value = mock.Mock() - - spec = { - 'type': 'FakeProfile', - 'version': '1.0', - 'properties': { - 'LIST': ['A', 'B'], - 'MAP': {'KEY1': 11, 'KEY2': 12}, - } - } - - body = vorp.ProfileCreateRequestBody(name='FAKE_NAME', spec=spec) - req = vorp.ProfileCreateRequest(profile=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("A profile named 'FAKE_NAME' already exists.", - str(ex.exc_info[1])) - mock_get.assert_called_once_with(self.ctx, 'FAKE_NAME') - - @mock.patch.object(pb.Profile, 'create') - def test_profile_create_type_not_found(self, mock_create): - self._setup_fakes() - spec = copy.deepcopy(self.spec) - spec['type'] = 'Bogus' - body = vorp.ProfileCreateRequestBody(name='foo', spec=spec) - req = vorp.ProfileCreateRequest(profile=body) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The profile_type 'Bogus-1.0' could not be " - "found.", str(ex.exc_info[1])) - - @mock.patch.object(pb.Profile, 'create') - def test_profile_create_invalid_spec(self, mock_create): - self._setup_fakes() - mock_create.side_effect = exc.InvalidSpec(message="badbad") - body = vorp.ProfileCreateRequestBody(name='foo', spec=self.spec) - req = vorp.ProfileCreateRequest(profile=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.InvalidSpec, ex.exc_info[0]) - self.assertEqual("badbad", str(ex.exc_info[1])) - - def test_profile_validate(self): - self._setup_fakes() - - expected_resp = { - 'created_at': None, - 'domain': '', - 'id': None, - 'metadata': None, - 'name': 'validated_profile', - 'project': 'profile_test_project', - 'type': 'TestProfile-1.0', - 'updated_at': None, - 'user': 'test_user_id', - 'spec': { - 'type': 'TestProfile', - 'version': '1.0', - 'properties': { - 'INT': 1, - 'STR': 'str', - 'LIST': ['v1', 'v2'], - 'MAP': {'KEY1': 1, 'KEY2': 'v2'}, - } - } - } - - body = vorp.ProfileValidateRequestBody(spec=self.spec) - request = vorp.ProfileValidateRequest(profile=body) - - resp = self.svc.profile_validate(self.ctx, request.obj_to_primitive()) - - self.assertEqual(expected_resp, resp) - - def test_profile_validate_failed(self): - self._setup_fakes() - - mock_do_validate = self.patchobject(fakes.TestProfile, 'do_validate') - mock_do_validate.side_effect = exc.ESchema(message='BOOM') - - body = vorp.ProfileValidateRequestBody(spec=self.spec) - request = vorp.ProfileValidateRequest(profile=body) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_validate, - self.ctx, request.obj_to_primitive()) - self.assertEqual(exc.InvalidSpec, ex.exc_info[0]) - self.assertEqual('BOOM', - str(ex.exc_info[1])) - - @mock.patch.object(po.Profile, 'find') - def test_profile_get(self, mock_find): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_obj.to_dict.return_value = {'foo': 'bar'} - req = vorp.ProfileGetRequest(identity='FAKE_PROFILE') - project_safe = not self.ctx.is_admin - - result = self.svc.profile_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with( - self.ctx, 'FAKE_PROFILE', project_safe=project_safe) - - @mock.patch.object(po.Profile, 'find') - def test_profile_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - req = vorp.ProfileGetRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_get, self.ctx, - req.obj_to_primitive()) - project_safe = not self.ctx.is_admin - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The profile 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with( - self.ctx, 'Bogus', project_safe=project_safe) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(po.Profile, 'find') - def test_profile_update(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_profile = mock.Mock() - x_profile.name = 'OLD_NAME' - x_profile.metadata = {'V': 'K'} - x_profile.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_profile - - params = {'name': 'NEW_NAME', 'metadata': {'K': 'V'}} - req_body = vorp.ProfileUpdateRequestBody(**params) - req = vorp.ProfileUpdateRequest(identity='PID', profile=req_body) - - result = self.svc.profile_update(self.ctx, req.obj_to_primitive()) - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'PID') - mock_load.assert_called_once_with(self.ctx, profile=x_obj) - self.assertEqual('NEW_NAME', x_profile.name) - self.assertEqual({'K': 'V'}, x_profile.metadata) - x_profile.store.assert_called_once_with(self.ctx) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(po.Profile, 'find') - def test_profile_update_name_none(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_profile = mock.Mock() - x_profile.name = 'OLD_NAME' - x_profile.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_profile - - params = {'name': None, 'metadata': {'K': 'V'}} - req_body = vorp.ProfileUpdateRequestBody(**params) - req = vorp.ProfileUpdateRequest(identity='PID', profile=req_body) - - result = self.svc.profile_update(self.ctx, req.obj_to_primitive()) - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'PID') - mock_load.assert_called_once_with(self.ctx, profile=x_obj) - self.assertEqual('OLD_NAME', x_profile.name) - self.assertEqual({'K': 'V'}, x_profile.metadata) - x_profile.store.assert_called_once_with(self.ctx) - - @mock.patch.object(po.Profile, 'find') - def test_profile_update_not_found(self, mock_find): - - mock_find.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - - req_body = vorp.ProfileUpdateRequestBody(name='NEW_NAME') - req = vorp.ProfileUpdateRequest(identity='Bogus', profile=req_body) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The profile 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(po.Profile, 'find') - def test_profile_update_no_change(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_profile = mock.Mock() - x_profile.name = 'OLD_NAME' - x_profile.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_profile - - req_body = vorp.ProfileUpdateRequestBody(name='OLD_NAME') - req = vorp.ProfileUpdateRequest(identity='PID', profile=req_body) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('No property needs an update.', - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'PID') - mock_load.assert_called_once_with(self.ctx, profile=x_obj) - self.assertEqual(0, x_profile.store.call_count) - self.assertEqual('OLD_NAME', x_profile.name) - - @mock.patch.object(fakes.TestProfile, 'delete') - @mock.patch.object(po.Profile, 'find') - def test_profile_delete(self, mock_find, mock_delete): - self._setup_fakes() - x_obj = mock.Mock(id='PROFILE_ID', type='TestProfile-1.0') - mock_find.return_value = x_obj - mock_delete.return_value = None - - req = vorp.ProfileDeleteRequest(identity='PROFILE_ID') - result = self.svc.profile_delete(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - mock_find.assert_called_once_with(self.ctx, 'PROFILE_ID') - mock_delete.assert_called_once_with(self.ctx, 'PROFILE_ID') - - @mock.patch.object(po.Profile, 'find') - def test_profile_delete_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='profile', - id='Bogus') - - req = vorp.ProfileDeleteRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_delete, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The profile 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(pb.Profile, 'delete') - @mock.patch.object(po.Profile, 'find') - def test_profile_delete_profile_in_use(self, mock_find, mock_delete): - self._setup_fakes() - x_obj = mock.Mock(id='PROFILE_ID', type='TestProfile-1.0') - mock_find.return_value = x_obj - err = exc.EResourceBusy(type='profile', id='PROFILE_ID') - mock_delete.side_effect = err - - req = vorp.ProfileDeleteRequest(identity='PROFILE_ID') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.profile_delete, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceInUse, ex.exc_info[0]) - self.assertEqual("The profile 'PROFILE_ID' cannot be deleted: " - "still referenced by some clusters and/or nodes.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'PROFILE_ID') - mock_delete.assert_called_once_with(self.ctx, 'PROFILE_ID') diff --git a/senlin/tests/unit/conductor/service/test_receivers.py b/senlin/tests/unit/conductor/service/test_receivers.py deleted file mode 100644 index 32ea6853d..000000000 --- a/senlin/tests/unit/conductor/service/test_receivers.py +++ /dev/null @@ -1,431 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.conductor import service -from senlin.engine.receivers import base as rb -from senlin.objects import cluster as co -from senlin.objects import receiver as ro -from senlin.objects.requests import receivers as orro -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ReceiverTest(base.SenlinTestCase): - def setUp(self): - super(ReceiverTest, self).setUp() - self.ctx = utils.dummy_context(project='receiver_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(ro.Receiver, 'get_all') - def test_receiver_list(self, mock_get): - fake_obj = mock.Mock() - fake_obj.to_dict.return_value = {'FOO': 'BAR'} - mock_get.return_value = [fake_obj] - - req = orro.ReceiverListRequest() - result = self.svc.receiver_list(self.ctx, req.obj_to_primitive()) - - self.assertIsInstance(result, list) - self.assertEqual([{'FOO': 'BAR'}], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - - @mock.patch.object(ro.Receiver, 'get_all') - def test_receiver_list_with_params(self, mock_get): - fake_obj = mock.Mock() - fake_obj.to_dict.return_value = {'FOO': 'BAR'} - mock_get.return_value = [fake_obj] - - marker = '7445519f-e9db-409f-82f4-187fb8334317' - req = orro.ReceiverListRequest(limit=1, marker=marker, sort='name', - type=['webhook'], - action=['CLUSTER_RESIZE'], - cluster_id=['123abc'], - user=['user123']) - result = self.svc.receiver_list(self.ctx, req.obj_to_primitive()) - - self.assertIsInstance(result, list) - self.assertEqual([{'FOO': 'BAR'}], result) - mock_get.assert_called_once_with(self.ctx, limit=1, marker=marker, - sort='name', - filters={'type': ['webhook'], - 'action': ['CLUSTER_RESIZE'], - 'cluster_id': ['123abc'], - 'user': ['user123']}, - project_safe=True) - - @mock.patch.object(ro.Receiver, 'get_all') - def test_receiver_list_with_project_safe(self, mock_get): - mock_get.return_value = [] - - req = orro.ReceiverListRequest(project_safe=False) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_list, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - self.ctx.is_admin = True - - result = self.svc.receiver_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=False) - mock_get.reset_mock() - - req = orro.ReceiverListRequest(project_safe=True) - result = self.svc.receiver_list(self.ctx, req.obj_to_primitive()) - self.assertEqual([], result) - mock_get.assert_called_once_with(self.ctx, project_safe=True) - mock_get.reset_mock() - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(rb.Receiver, 'create') - def test_receiver_create_webhook_succeed(self, mock_create, mock_find): - fake_cluster = mock.Mock() - fake_cluster.user = self.ctx.user_id - mock_find.return_value = fake_cluster - - fake_receiver = mock.Mock(id='FAKE_RECEIVER') - fake_receiver.to_dict.return_value = { - 'id': 'FAKE_RECEIVER', - 'foo': 'bar' - } - mock_create.return_value = fake_receiver - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_WEBHOOK, cluster_id='C1', - action=consts.CLUSTER_RESIZE) - - result = self.svc.receiver_create(self.ctx, req.obj_to_primitive()) - - self.assertIsInstance(result, dict) - self.assertEqual('FAKE_RECEIVER', result['id']) - mock_find.assert_called_once_with(self.ctx, 'C1') - mock_create.assert_called_once_with( - self.ctx, 'webhook', fake_cluster, consts.CLUSTER_RESIZE, - name='r1', user=self.ctx.user_id, project=self.ctx.project_id, - domain=self.ctx.domain_id, params={}) - - # test params passed - mock_create.reset_mock() - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_WEBHOOK, cluster_id='C1', - action=consts.CLUSTER_RESIZE, params={'FOO': 'BAR'}) - - self.svc.receiver_create(self.ctx, req.obj_to_primitive()) - mock_create.assert_called_once_with( - self.ctx, 'webhook', fake_cluster, consts.CLUSTER_RESIZE, - name='r1', user=self.ctx.user_id, project=self.ctx.project_id, - domain=self.ctx.domain_id, params={'FOO': 'BAR'}) - - @mock.patch.object(ro.Receiver, 'get_by_name') - def test_receiver_create_name_duplicated(self, mock_get): - cfg.CONF.set_override('name_unique', True) - # Return an existing instance - mock_get.return_value = mock.Mock() - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_MESSAGE) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("A receiver named 'r1' already exists.", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_receiver_create_webhook_cluster_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='cluster', id='C1') - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_WEBHOOK, cluster_id='C1', - action=consts.CLUSTER_RESIZE) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("The referenced cluster 'C1' could not be found.", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_receiver_create_webhook_invalid_action(self, mock_find): - fake_cluster = mock.Mock() - fake_cluster.user = 'someone' - mock_find.return_value = fake_cluster - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_WEBHOOK, cluster_id='C1', - action=consts.CLUSTER_CREATE) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Action name cannot be any of ['CLUSTER_CREATE'].", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(rb.Receiver, 'create') - def test_receiver_create_webhook_forbidden(self, mock_create, mock_find): - fake_cluster = mock.Mock() - fake_cluster.user = 'someone' - mock_find.return_value = fake_cluster - req = orro.ReceiverCreateRequestBody( - name='r1', type=consts.RECEIVER_WEBHOOK, cluster_id='C1', - action=consts.CLUSTER_RESIZE) - - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - fake_receiver = mock.Mock(id='FAKE_RECEIVER') - fake_receiver.to_dict.return_value = { - 'id': 'FAKE_RECEIVER', - 'foo': 'bar' - } - mock_create.return_value = fake_receiver - - # an admin can do this - self.ctx.is_admin = True - result = self.svc.receiver_create(self.ctx, req.obj_to_primitive()) - self.assertIsInstance(result, dict) - - @mock.patch.object(co.Cluster, 'find') - def test_receiver_create_webhook_cluster_not_specified(self, mock_find): - fake_cluster = mock.Mock() - fake_cluster.user = self.ctx.user_id - mock_find.return_value = fake_cluster - req1 = orro.ReceiverCreateRequestBody(name='r1', type='webhook', - action='CLUSTER_RESIZE') - req2 = orro.ReceiverCreateRequestBody(name='r1', type='webhook', - cluster_id=None, - action='CLUSTER_RESIZE') - - for req in [req1, req2]: - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Cluster identity is required for creating " - "webhook receiver.", - str(ex.exc_info[1])) - - @mock.patch.object(co.Cluster, 'find') - def test_receiver_create_webhook_action_not_specified(self, mock_find): - fake_cluster = mock.Mock() - fake_cluster.user = self.ctx.user_id - mock_find.return_value = fake_cluster - req1 = orro.ReceiverCreateRequestBody(name='r1', type='webhook', - cluster_id='C1') - req2 = orro.ReceiverCreateRequestBody(name='r1', type='webhook', - cluster_id='C1', action=None) - - for req in [req1, req2]: - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_create, - self.ctx, req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual("Action name is required for creating webhook " - "receiver.", str(ex.exc_info[1])) - - @mock.patch.object(rb.Receiver, 'create') - def test_receiver_create_message_succeed(self, mock_create): - fake_receiver = mock.Mock(id='FAKE_RECEIVER') - fake_receiver.to_dict.return_value = { - 'id': 'FAKE_RECEIVER', - 'foo': 'bar' - } - mock_create.return_value = fake_receiver - - req = orro.ReceiverCreateRequestBody(name='r1', type='message') - result = self.svc.receiver_create(self.ctx, req.obj_to_primitive()) - - self.assertIsInstance(result, dict) - self.assertEqual('FAKE_RECEIVER', result['id']) - mock_create.assert_called_once_with( - self.ctx, 'message', None, None, name='r1', user=self.ctx.user_id, - project=self.ctx.project_id, domain=self.ctx.domain_id, params={}) - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_get(self, mock_find): - fake_obj = mock.Mock() - mock_find.return_value = fake_obj - fake_obj.to_dict.return_value = {'FOO': 'BAR'} - - req = orro.ReceiverGetRequest(identity='FAKE_ID') - res = self.svc.receiver_get(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'FOO': 'BAR'}, res) - mock_find.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_get_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='receiver', id='RR') - - req = orro.ReceiverGetRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_get, self.ctx, - req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(rb.Receiver, 'load') - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_update_request(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_receiver = mock.Mock() - x_receiver.name = 'OLD_NAME' - x_receiver.params = {'count': '3'} - x_receiver.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_receiver - - params = {'name': 'NEW_NAME', 'params': {'count': '3'}, - 'identity': 'PID'} - - req = orro.ReceiverUpdateRequest(**params) - - result = self.svc.receiver_update(self.ctx, req.obj_to_primitive()) - self.assertEqual({'foo': 'bar'}, result) - mock_find.assert_called_once_with(self.ctx, 'PID') - mock_load.assert_called_once_with(self.ctx, receiver_obj=x_obj) - self.assertEqual('NEW_NAME', x_receiver.name) - self.assertEqual({'count': '3'}, x_receiver.params) - x_receiver.store.assert_called_once_with(self.ctx, update=True) - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_update_not_found(self, mock_find): - - mock_find.side_effect = exc.ResourceNotFound(type='receiver', - id='Bogus') - - kwargs = {'identity': 'Bogus', 'name': 'NEW_NAME'} - req = orro.ReceiverUpdateRequest(**kwargs) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The receiver 'Bogus' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'Bogus') - - @mock.patch.object(rb.Receiver, 'load') - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_update_no_change(self, mock_find, mock_load): - x_obj = mock.Mock() - mock_find.return_value = x_obj - x_receiver = mock.Mock() - x_receiver.name = 'OLD_NAME' - x_receiver.to_dict.return_value = {'foo': 'bar'} - mock_load.return_value = x_receiver - - kwargs = {'name': 'OLD_NAME', 'identity': 'PID'} - req = orro.ReceiverUpdateRequest(**kwargs) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_update, - self.ctx, req.obj_to_primitive()) - - self.assertEqual(exc.BadRequest, ex.exc_info[0]) - self.assertEqual('No property needs an update.', - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'PID') - mock_load.assert_called_once_with(self.ctx, receiver_obj=x_obj) - self.assertEqual(0, x_receiver.store.call_count) - self.assertEqual('OLD_NAME', x_receiver.name) - - @mock.patch.object(ro.Receiver, 'find') - @mock.patch.object(rb.Receiver, 'delete') - def test_receiver_delete(self, mock_delete, mock_find): - fake_obj = mock.Mock() - fake_obj.id = 'FAKE_ID' - mock_find.return_value = fake_obj - req = orro.ReceiverDeleteRequest(identity='FAKE_RECEIVER') - - result = self.svc.receiver_delete(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_delete.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_delete_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='receiver', id='RR') - - req = orro.ReceiverDeleteRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_delete, self.ctx, - req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(rb.Receiver, 'load') - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_notify(self, mock_find, mock_load): - fake_obj = mock.Mock() - fake_obj.id = 'FAKE_ID' - fake_obj.type = 'message' - fake_obj.user = self.ctx.user_id - fake_receiver = mock.Mock() - mock_find.return_value = fake_obj - mock_load.return_value = fake_receiver - - req = orro.ReceiverNotifyRequest(identity='FAKE_RECEIVER') - result = self.svc.receiver_notify(self.ctx, req.obj_to_primitive()) - - self.assertIsNone(result) - mock_find.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_load.assert_called_once_with(self.ctx, receiver_obj=fake_obj, - project_safe=True) - fake_receiver.notify.assert_called_once_with(self.ctx) - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_notify_not_found(self, mock_find): - mock_find.side_effect = exc.ResourceNotFound(type='receiver', id='RR') - - req = orro.ReceiverNotifyRequest(identity='Bogus') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_notify, self.ctx, - req.obj_to_primitive()) - self.assertEqual(exc.ResourceNotFound, ex.exc_info[0]) - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_notify_permission_check_fail(self, mock_find): - fake_obj = mock.Mock() - fake_obj.id = 'FAKE_ID' - fake_obj.user = 'foo' - mock_find.return_value = fake_obj - - req = orro.ReceiverNotifyRequest(identity='FAKE_RECEIVER') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_notify, self.ctx, - req.obj_to_primitive()) - self.assertEqual(exc.Forbidden, ex.exc_info[0]) - - @mock.patch.object(ro.Receiver, 'find') - def test_receiver_notify_incorrect_type(self, mock_find): - fake_obj = mock.Mock() - fake_obj.id = 'FAKE_ID' - fake_obj.user = self.ctx.user_id - fake_obj.type = 'not_message' - mock_find.return_value = fake_obj - - req = orro.ReceiverNotifyRequest(identity='FAKE_RECEIVER') - ex = self.assertRaises(rpc.ExpectedException, - self.svc.receiver_notify, self.ctx, - req.obj_to_primitive()) - self.assertEqual(exc.BadRequest, ex.exc_info[0]) diff --git a/senlin/tests/unit/conductor/service/test_webhooks.py b/senlin/tests/unit/conductor/service/test_webhooks.py deleted file mode 100644 index 02b0a255d..000000000 --- a/senlin/tests/unit/conductor/service/test_webhooks.py +++ /dev/null @@ -1,233 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_messaging.rpc import dispatcher as rpc - -from senlin.common import consts -from senlin.common import exception -from senlin.conductor import service -from senlin.engine.actions import base as action_mod -from senlin.engine import dispatcher -from senlin.objects import cluster as co -from senlin.objects import receiver as ro -from senlin.objects.requests import webhooks as vorw -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class WebhookTest(base.SenlinTestCase): - def setUp(self): - super(WebhookTest, self).setUp() - self.ctx = utils.dummy_context(project='webhook_test_project') - self.svc = service.ConductorService('host-a', 'topic-a') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_params_in_body_with_params( - self, mock_get, mock_find, mock_action, notify): - mock_find.return_value = mock.Mock(id='FAKE_CLUSTER') - mock_get.return_value = mock.Mock(id='01234567-abcd-efef', - cluster_id='FAKE_CLUSTER', - action='DANCE', - params={'foo': 'bar'}) - mock_action.return_value = 'ACTION_ID' - - body = {'kee': 'vee'} - req = vorw.WebhookTriggerRequestParamsInBody(identity='FAKE_RECEIVER', - body=body) - res = self.svc.webhook_trigger(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - - mock_get.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'DANCE', - name='webhook_01234567', - cluster_id='FAKE_CLUSTER', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'kee': 'vee', 'foo': 'bar'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_params_in_body_no_params( - self, mock_get, mock_find, mock_action, notify): - mock_find.return_value = mock.Mock(id='FAKE_CLUSTER') - mock_get.return_value = mock.Mock(id='01234567-abcd-efef', - cluster_id='FAKE_CLUSTER', - action='DANCE', - params={'foo': 'bar'}) - mock_action.return_value = 'ACTION_ID' - - body = {} - req = vorw.WebhookTriggerRequestParamsInBody(identity='FAKE_RECEIVER', - body=body) - res = self.svc.webhook_trigger(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - - mock_get.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'DANCE', - name='webhook_01234567', - cluster_id='FAKE_CLUSTER', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'foo': 'bar'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_params_in_body_receiver_not_found( - self, mock_find): - mock_find.side_effect = exception.ResourceNotFound(type='receiver', - id='RRR') - body = None - req = vorw.WebhookTriggerRequestParamsInBody(identity='RRR', body=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.webhook_trigger, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exception.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The receiver 'RRR' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'RRR') - - @mock.patch.object(ro.Receiver, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_webhook_trigger_params_in_body_cluster_not_found( - self, mock_cluster, mock_find): - receiver = mock.Mock() - receiver.cluster_id = 'BOGUS' - mock_find.return_value = receiver - mock_cluster.side_effect = exception.ResourceNotFound(type='cluster', - id='BOGUS') - body = None - req = vorw.WebhookTriggerRequestParamsInBody(identity='RRR', body=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.webhook_trigger, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exception.BadRequest, ex.exc_info[0]) - self.assertEqual("The referenced cluster 'BOGUS' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'RRR') - mock_cluster.assert_called_once_with(self.ctx, 'BOGUS') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_with_params(self, mock_get, mock_find, - mock_action, notify): - mock_find.return_value = mock.Mock(id='FAKE_CLUSTER') - mock_get.return_value = mock.Mock(id='01234567-abcd-efef', - cluster_id='FAKE_CLUSTER', - action='DANCE', - params={'foo': 'bar'}) - mock_action.return_value = 'ACTION_ID' - - body = vorw.WebhookTriggerRequestBody(params={'kee': 'vee'}) - req = vorw.WebhookTriggerRequest(identity='FAKE_RECEIVER', - body=body) - res = self.svc.webhook_trigger(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - - mock_get.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'DANCE', - name='webhook_01234567', - cluster_id='FAKE_CLUSTER', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'kee': 'vee', 'foo': 'bar'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(co.Cluster, 'find') - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_no_params(self, mock_get, mock_find, - mock_action, notify): - mock_find.return_value = mock.Mock(id='FAKE_CLUSTER') - mock_get.return_value = mock.Mock(id='01234567-abcd-efef', - cluster_id='FAKE_CLUSTER', - action='DANCE', - params={'foo': 'bar'}) - mock_action.return_value = 'ACTION_ID' - - body = vorw.WebhookTriggerRequestBody(params={}) - req = vorw.WebhookTriggerRequest(identity='FAKE_RECEIVER', - body=body) - res = self.svc.webhook_trigger(self.ctx, req.obj_to_primitive()) - - self.assertEqual({'action': 'ACTION_ID'}, res) - - mock_get.assert_called_once_with(self.ctx, 'FAKE_RECEIVER') - mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - mock_action.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'DANCE', - name='webhook_01234567', - cluster_id='FAKE_CLUSTER', - cause=consts.CAUSE_RPC, - status=action_mod.Action.READY, - inputs={'foo': 'bar'}, - ) - notify.assert_called_once_with() - - @mock.patch.object(ro.Receiver, 'find') - def test_webhook_trigger_receiver_not_found(self, mock_find): - mock_find.side_effect = exception.ResourceNotFound(type='receiver', - id='RRR') - body = vorw.WebhookTriggerRequestBody(params=None) - req = vorw.WebhookTriggerRequest(identity='RRR', body=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.webhook_trigger, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exception.ResourceNotFound, ex.exc_info[0]) - self.assertEqual("The receiver 'RRR' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'RRR') - - @mock.patch.object(ro.Receiver, 'find') - @mock.patch.object(co.Cluster, 'find') - def test_webhook_trigger_cluster_not_found(self, mock_cluster, mock_find): - receiver = mock.Mock() - receiver.cluster_id = 'BOGUS' - mock_find.return_value = receiver - mock_cluster.side_effect = exception.ResourceNotFound(type='cluster', - id='BOGUS') - body = vorw.WebhookTriggerRequestBody(params=None) - req = vorw.WebhookTriggerRequest(identity='RRR', body=body) - ex = self.assertRaises(rpc.ExpectedException, - self.svc.webhook_trigger, self.ctx, - req.obj_to_primitive()) - - self.assertEqual(exception.BadRequest, ex.exc_info[0]) - self.assertEqual("The referenced cluster 'BOGUS' could not be found.", - str(ex.exc_info[1])) - mock_find.assert_called_once_with(self.ctx, 'RRR') - mock_cluster.assert_called_once_with(self.ctx, 'BOGUS') diff --git a/senlin/tests/unit/conductor/test_service.py b/senlin/tests/unit/conductor/test_service.py deleted file mode 100644 index d1397c56c..000000000 --- a/senlin/tests/unit/conductor/test_service.py +++ /dev/null @@ -1,186 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import eventlet -from oslo_config import cfg -import oslo_messaging -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.conductor import service -from senlin.objects.requests import build_info as vorb -from senlin.objects import service as service_obj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ConductorTest(base.SenlinTestCase): - def setUp(self): - super(ConductorTest, self).setUp() - self.context = utils.dummy_context() - - self.service_id = '4db0a14c-dc10-4131-8ed6-7573987ce9b0' - self.tg = mock.Mock() - self.topic = consts.HEALTH_MANAGER_TOPIC - - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - self.svc.tg = self.tg - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers(self, mock_service_init): - service.ConductorService('HOST', self.topic) - - mock_service_init.assert_called_once_with(1000) - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers_override(self, mock_service_init): - cfg.CONF.set_override('threads', 100, group='conductor') - - service.ConductorService('HOST', self.topic) - - mock_service_init.assert_called_once_with(100) - - def test_init(self): - self.assertEqual(self.service_id, self.svc.service_id) - self.assertEqual(self.tg, self.svc.tg) - self.assertEqual(self.topic, self.svc.topic) - - @mock.patch.object(uuidutils, 'generate_uuid') - @mock.patch.object(oslo_messaging, 'get_rpc_server') - @mock.patch.object(service_obj.Service, 'create') - def test_service_start(self, mock_service_create, mock_rpc_server, - mock_uuid): - service_uuid = '4db0a14c-dc10-4131-8ed6-7573987ce9b1' - mock_uuid.return_value = service_uuid - - self.svc.start() - - mock_uuid.assert_called_once() - mock_service_create.assert_called_once() - self.svc.server.start.assert_called_once() - - self.assertEqual(service_uuid, self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop(self, mock_delete): - self.svc.server = mock.Mock() - - self.svc.stop() - - self.svc.server.stop.assert_called_once() - self.svc.server.wait.assert_called_once() - - mock_delete.assert_called_once_with(self.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop_not_yet_started(self, mock_delete): - self.svc.server = None - - self.svc.stop() - - mock_delete.assert_called_once_with(self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_update(self, mock_update): - mock_update.return_value = mock.Mock() - self.svc.service_manage_report() - mock_update.assert_called_once_with(mock.ANY, - self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_with_exception(self, mock_update): - mock_update.side_effect = Exception('blah') - self.svc.service_manage_report() - self.assertEqual(mock_update.call_count, 1) - - def test_get_revision(self): - self.assertEqual( - cfg.CONF.revision['senlin_engine_revision'], - self.svc.get_revision( - self.context, vorb.GetRevisionRequest().obj_to_primitive() - ) - ) - - -class ConductorCleanupTest(base.SenlinTestCase): - def setUp(self): - super(ConductorCleanupTest, self).setUp() - - self.service_id = '4db0a14c-dc10-4131-8ed6-7573987ce9b0' - self.topic = consts.HEALTH_MANAGER_TOPIC - - @mock.patch.object(service_obj.Service, 'update') - def test_conductor_manage_report(self, mock_update): - cfg.CONF.set_override('periodic_interval', 0.1) - - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - - # start engine and verify that update is being called more than once - self.svc.start() - eventlet.sleep(0.6) - self.assertGreater(mock_update.call_count, 1) - self.svc.stop() - - @mock.patch.object(service_obj.Service, 'update') - def test_conductor_manage_report_with_exception(self, mock_update): - cfg.CONF.set_override('periodic_interval', 0.1) - - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - - # start engine and verify that update is being called more than once - # even with the exception being thrown - mock_update.side_effect = Exception('blah') - self.svc.start() - eventlet.sleep(0.6) - self.assertGreater(mock_update.call_count, 1) - self.svc.stop() - - @mock.patch.object(service_obj.Service, 'cleanup_all_expired') - def test_service_manage_cleanup(self, mock_cleanup): - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - self.svc.service_manage_cleanup() - mock_cleanup.assert_called_once_with('senlin-conductor') - - @mock.patch.object(service_obj.Service, 'cleanup_all_expired') - def test_service_manage_cleanup_without_exception(self, - mock_cleanup): - cfg.CONF.set_override('periodic_interval', 0.1) - - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - - # start engine and verify that get_all is being called more than once - self.svc.start() - eventlet.sleep(0.6) - self.svc.stop() - mock_cleanup.assert_called() - - @mock.patch.object(service_obj.Service, 'cleanup_all_expired') - def test_service_manage_cleanup_with_exception(self, mock_cleanup): - cfg.CONF.set_override('periodic_interval', 0.1) - - self.svc = service.ConductorService('HOST', self.topic) - self.svc.service_id = self.service_id - - # start engine and verify that get_all is being called more than once - # even with the exception being thrown - mock_cleanup.side_effect = Exception('blah') - self.svc.start() - eventlet.sleep(0.6) - self.svc.stop() - mock_cleanup.assert_called() diff --git a/senlin/tests/unit/db/__init__.py b/senlin/tests/unit/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/db/shared.py b/senlin/tests/unit/db/shared.py deleted file mode 100644 index 0a7328d9c..000000000 --- a/senlin/tests/unit/db/shared.py +++ /dev/null @@ -1,193 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_serialization import jsonutils -from oslo_utils import timeutils as tu -from oslo_utils import uuidutils - -from senlin.db.sqlalchemy import api as db_api -from senlin.engine import parser - - -sample_profile = """ - name: test_profile_name - type: my_test_profile_type - spec: - template: - heat_template_version: "2013-05-23" - resources: - myrandom: OS::Heat::RandomString - files: - myfile: contents -""" - -sample_action = """ - name: test_cluster_create_action - target: cluster_001 - action: create - cause: User Initiate - cluster_id: cluster_001_id - timeout: 60 - control: READY - status: INIT - status_reason: Just Initialized - inputs: - min_size: 1 - max_size: 10 - pause_time: PT10M -""" - - -UUIDs = (UUID1, UUID2, UUID3) = sorted([uuidutils.generate_uuid() - for x in range(3)]) - - -def create_profile(context, profile=sample_profile, **kwargs): - data = parser.simple_parse(profile) - data['user'] = context.user_id - data['project'] = context.project_id - data['domain'] = context.domain_id - data.update(kwargs) - return db_api.profile_create(context, data) - - -def create_cluster(ctx, profile, **kwargs): - values = { - 'name': 'db_test_cluster_name', - 'profile_id': profile.id, - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': 'unknown', - 'parent': None, - 'next_index': 1, - 'timeout': 60, - 'desired_capacity': 0, - 'init_at': tu.utcnow(True), - 'status': 'INIT', - 'status_reason': 'Just Initialized', - 'meta_data': {}, - 'dependents': {}, - 'config': {}, - } - values.update(kwargs) - if 'project' in kwargs: - values.update({'project': kwargs.get('project')}) - return db_api.cluster_create(ctx, values) - - -def create_node(ctx, cluster, profile, **kwargs): - if cluster: - cluster_id = cluster.id - index = db_api.cluster_next_index(ctx, cluster_id) - else: - cluster_id = '' - index = -1 - - values = { - 'name': 'test_node_name', - 'physical_id': UUID1, - 'cluster_id': cluster_id, - 'profile_id': profile.id, - 'project': ctx.project_id, - 'index': index, - 'role': None, - 'created_at': None, - 'updated_at': None, - 'status': 'ACTIVE', - 'status_reason': 'create complete', - 'meta_data': jsonutils.loads('{"foo": "123"}'), - 'data': jsonutils.loads('{"key1": "value1"}'), - 'dependents': {}, - 'tainted': False, - } - values.update(kwargs) - return db_api.node_create(ctx, values) - - -def create_webhook(ctx, obj_id, obj_type, action, **kwargs): - values = { - 'name': 'test_webhook_name', - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - 'created_at': None, - 'obj_id': obj_id, - 'obj_type': obj_type, - 'action': action, - 'credential': None, - 'params': None, - } - values.update(kwargs) - return db_api.webhook_create(ctx, values) - - -def create_action(ctx, **kwargs): - values = { - 'context': kwargs.get('context'), - 'description': 'Action description', - 'target': kwargs.get('target'), - 'action': kwargs.get('action'), - 'cause': 'Reason for action', - 'owner': kwargs.get('owner'), - 'interval': -1, - 'inputs': {'key': 'value'}, - 'outputs': {'result': 'value'}, - 'depends_on': [], - 'depended_by': [] - } - values.update(kwargs) - return db_api.action_create(ctx, values) - - -def create_policy(ctx, **kwargs): - values = { - 'name': 'test_policy', - 'type': 'senlin.policy.scaling', - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - 'spec': { - 'type': 'senlin.policy.scaling', - 'version': '1.0', - 'properties': { - 'adjustment_type': 'WHATEVER', - 'count': 1, - } - }, - 'data': None, - } - - values.update(kwargs) - return db_api.policy_create(ctx, values) - - -def create_event(ctx, **kwargs): - values = { - 'timestamp': tu.utcnow(True), - 'obj_id': 'FAKE_ID', - 'obj_name': 'FAKE_NAME', - 'obj_type': 'CLUSTER', - 'cluster_id': 'FAKE_CLUSTER', - 'level': '20', - 'user': ctx.user_id, - 'project': ctx.project_id, - 'action': 'DANCE', - 'status': 'READY', - 'status_reason': 'Just created.', - 'meta_data': { - 'air': 'polluted' - } - } - - values.update(kwargs) - return db_api.event_create(ctx, values) diff --git a/senlin/tests/unit/db/test_action_api.py b/senlin/tests/unit/db/test_action_api.py deleted file mode 100644 index 577cec563..000000000 --- a/senlin/tests/unit/db/test_action_api.py +++ /dev/null @@ -1,807 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import time - -from oslo_utils import timeutils as tu -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.engine import parser -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - - -def _create_action(context, action_json=shared.sample_action, **kwargs): - data = parser.simple_parse(action_json) - data['user'] = context.user_id - data['project'] = context.project_id - data['domain'] = context.domain_id - data.update(kwargs) - return db_api.action_create(context, data) - - -class DBAPIActionTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIActionTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_action_create(self): - data = parser.simple_parse(shared.sample_action) - action = _create_action(self.ctx) - - self.assertIsNotNone(action) - self.assertEqual(data['name'], action.name) - self.assertEqual(data['cluster_id'], action.cluster_id) - self.assertEqual(data['target'], action.target) - self.assertEqual(data['action'], action.action) - self.assertEqual(data['cause'], action.cause) - self.assertEqual(data['timeout'], action.timeout) - self.assertEqual(data['status'], action.status) - self.assertEqual(data['status_reason'], action.status_reason) - self.assertEqual(10, action.inputs['max_size']) - self.assertEqual(self.ctx.user_id, action.user) - self.assertEqual(self.ctx.project_id, action.project) - self.assertEqual(self.ctx.domain_id, action.domain) - self.assertIsNone(action.outputs) - - def test_action_update(self): - action = _create_action(self.ctx) - values = { - 'status': 'ERROR', - 'status_reason': 'Cluster creation failed', - 'data': {'key1': 'value1', 'key2': 'value2'} - } - db_api.action_update(self.ctx, action.id, values) - action = db_api.action_get(self.ctx, action.id) - self.assertEqual('ERROR', action.status) - self.assertEqual('Cluster creation failed', action.status_reason) - self.assertEqual({'key1': 'value1', 'key2': 'value2'}, action.data) - - self.assertRaises(exception.ResourceNotFound, - db_api.action_update, self.ctx, 'fake-uuid', values) - - def test_action_get(self): - data = parser.simple_parse(shared.sample_action) - action = _create_action(self.ctx) - retobj = db_api.action_get(self.ctx, action.id) - - self.assertIsNotNone(retobj) - self.assertEqual(data['name'], retobj.name) - self.assertEqual(data['target'], retobj.target) - self.assertEqual(data['action'], retobj.action) - self.assertEqual(data['cause'], retobj.cause) - self.assertEqual(data['timeout'], retobj.timeout) - self.assertEqual(data['status'], retobj.status) - self.assertEqual(data['status_reason'], retobj.status_reason) - self.assertEqual(10, retobj.inputs['max_size']) - self.assertIsNone(retobj.outputs) - - def test_action_get_with_invalid_id(self): - retobj = db_api.action_get(self.ctx, 'fake-uuid') - self.assertIsNone(retobj) - - def test_action_get_by_name(self): - data = parser.simple_parse(shared.sample_action) - _create_action(self.ctx) - retobj = db_api.action_get_by_name(self.ctx, data['name']) - - self.assertIsNotNone(retobj) - self.assertEqual(data['name'], retobj.name) - self.assertEqual(data['target'], retobj.target) - self.assertEqual(data['action'], retobj.action) - self.assertEqual(data['cause'], retobj.cause) - self.assertEqual(data['timeout'], retobj.timeout) - self.assertEqual(data['status'], retobj.status) - self.assertEqual(data['status_reason'], retobj.status_reason) - self.assertEqual(10, retobj.inputs['max_size']) - self.assertIsNone(retobj.outputs) - - def test_action_get_by_name_duplicated(self): - data = parser.simple_parse(shared.sample_action) - action = _create_action(self.ctx) - another_action = _create_action(self.ctx) - - self.assertIsNotNone(action) - self.assertIsNotNone(another_action) - self.assertNotEqual(action.id, another_action.id) - self.assertRaises(exception.MultipleChoices, - db_api.action_get_by_name, - self.ctx, data['name']) - - def test_action_get_by_name_invalid(self): - retobj = db_api.action_get_by_name(self.ctx, 'fake-name') - self.assertIsNone(retobj) - - def test_action_get_by_short_id(self): - spec1 = {'id': 'same-part-unique-part'} - spec2 = {'id': 'same-part-part-unique'} - action1 = _create_action(self.ctx, **spec1) - action2 = _create_action(self.ctx, **spec2) - - ret_action1 = db_api.action_get_by_short_id(self.ctx, spec1['id'][:11]) - self.assertEqual(ret_action1.id, action1.id) - ret_action2 = db_api.action_get_by_short_id(self.ctx, spec2['id'][:11]) - self.assertEqual(ret_action2.id, action2.id) - - self.assertRaises(exception.MultipleChoices, - db_api.action_get_by_short_id, - self.ctx, 'same-part-') - - def test_action_get_project_safe(self): - parser.simple_parse(shared.sample_action) - action = _create_action(self.ctx) - new_ctx = utils.dummy_context(project='another-project') - retobj = db_api.action_get(new_ctx, action.id, project_safe=True) - self.assertIsNone(retobj) - retobj = db_api.action_get(new_ctx, action.id, project_safe=False) - self.assertIsNotNone(retobj) - - def test_action_get_with_admin_context(self): - parser.simple_parse(shared.sample_action) - action = _create_action(self.ctx) - new_ctx = utils.dummy_context(project='another-project', is_admin=True) - - retobj = db_api.action_get(new_ctx, action.id, project_safe=True) - self.assertIsNotNone(retobj) - - retobj = db_api.action_get(new_ctx, action.id, project_safe=False) - self.assertIsNotNone(retobj) - - def test_acquire_first_ready_none(self): - data = {'created_at': tu.utcnow(True)} - - _create_action(self.ctx, **data) - result = db_api.action_acquire_first_ready(self.ctx, 'fake_o', - tu.utcnow(True)) - self.assertIsNone(result) - - def test_acquire_first_ready_one(self): - data = {'created_at': tu.utcnow(True), 'id': 'fake_UUID'} - _create_action(self.ctx, **data) - - result = db_api.action_acquire_first_ready(self.ctx, 'fake_o', - tu.utcnow(True)) - self.assertIsNone(result) - - def test_acquire_first_ready_mult(self): - data = { - 'created_at': tu.utcnow(True), - 'status': 'READY', - } - action1 = _create_action(self.ctx, **data) - time.sleep(1) - - data['created_at'] = tu.utcnow(True) - _create_action(self.ctx, **data) - - result = db_api.action_acquire_first_ready(self.ctx, 'fake_o', - time.time()) - self.assertEqual(action1.id, result.id) - - def test_action_acquire_random_ready(self): - specs = [ - {'name': 'A01', 'status': 'INIT'}, - {'name': 'A02', 'status': 'READY', 'owner': 'worker1'}, - {'name': 'A03', 'status': 'INIT'}, - {'name': 'A04', 'status': 'READY'} - ] - - for spec in specs: - _create_action(self.ctx, **spec) - - worker = 'worker2' - timestamp = time.time() - action = db_api.action_acquire_random_ready(self.ctx, worker, - timestamp) - - self.assertIn(action.name, ('A02', 'A04')) - self.assertEqual('worker2', action.owner) - self.assertEqual(consts.ACTION_RUNNING, action.status) - self.assertEqual(timestamp, float(action.start_time)) - - def test_action_get_all_by_owner(self): - specs = [ - {'name': 'A01', 'owner': 'work1'}, - {'name': 'A02', 'owner': 'work2'}, - {'name': 'A03', 'owner': 'work1'}, - {'name': 'A04', 'owner': 'work3'} - ] - - for spec in specs: - _create_action(self.ctx, **spec) - - actions = db_api.action_get_all_by_owner(self.ctx, 'work1') - self.assertEqual(2, len(actions)) - names = [p.name for p in actions] - for spec in ['A01', 'A03']: - self.assertIn(spec, names) - - action_fake_owner = db_api.action_get_all_by_owner(self.ctx, - 'fake-owner') - self.assertEqual(0, len(action_fake_owner)) - - def test_action_get_all(self): - specs = [ - {'name': 'A01', 'target': 'cluster_001'}, - {'name': 'A02', 'target': 'node_001'}, - ] - - for spec in specs: - _create_action(self.ctx, **spec) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(2, len(actions)) - names = [p.name for p in actions] - for spec in specs: - self.assertIn(spec['name'], names) - - def test_action_get_all_active_by_target(self): - specs = [ - {'name': 'A01', 'target': 'cluster_001', 'status': 'READY'}, - {'name': 'A02', 'target': 'node_001'}, - {'name': 'A03', 'target': 'cluster_001', 'status': 'INIT'}, - {'name': 'A04', 'target': 'cluster_001', 'status': 'WAITING'}, - {'name': 'A05', 'target': 'cluster_001', 'status': 'READY'}, - {'name': 'A06', 'target': 'cluster_001', 'status': 'RUNNING'}, - {'name': 'A07', 'target': 'cluster_001', 'status': 'SUCCEEDED'}, - {'name': 'A08', 'target': 'cluster_001', 'status': 'FAILED'}, - {'name': 'A09', 'target': 'cluster_001', 'status': 'CANCELLED'}, - {'name': 'A10', 'target': 'cluster_001', - 'status': 'WAITING_LIFECYCLE_COMPLETION'}, - {'name': 'A11', 'target': 'cluster_001', 'status': 'SUSPENDED'}, - ] - - for spec in specs: - _create_action(self.ctx, **spec) - - actions = db_api.action_get_all_active_by_target(self.ctx, - 'cluster_001') - self.assertEqual(5, len(actions)) - names = [p.name for p in actions] - for name in names: - self.assertIn(name, ['A01', 'A04', 'A05', 'A06', 'A10']) - - def test_action_get_all_project_safe(self): - parser.simple_parse(shared.sample_action) - _create_action(self.ctx) - new_ctx = utils.dummy_context(project='another-project') - actions = db_api.action_get_all(new_ctx, project_safe=True) - self.assertEqual(0, len(actions)) - actions = db_api.action_get_all(new_ctx, project_safe=False) - self.assertEqual(1, len(actions)) - - def test_action_check_status(self): - specs = [ - {'name': 'A01', 'target': 'cluster_001'}, - {'name': 'A02', 'target': 'node_001'}, - ] - - id_of = {} - for spec in specs: - action = _create_action(self.ctx, **spec) - id_of[spec['name']] = action.id - - db_api.dependency_add(self.ctx, id_of['A02'], id_of['A01']) - action1 = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_WAITING, action1.status) - - timestamp = time.time() - status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp) - self.assertEqual(consts.ACTION_WAITING, status) - - status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp) - self.assertEqual(consts.ACTION_WAITING, status) - timestamp = time.time() - db_api.action_mark_succeeded(self.ctx, id_of['A02'], timestamp) - - status = db_api.action_check_status(self.ctx, id_of['A01'], timestamp) - self.assertEqual(consts.ACTION_READY, status) - - action1 = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual('All depended actions completed.', - action1.status_reason) - self.assertEqual(round(timestamp, 6), float(action1.end_time)) - - def _check_dependency_add_dependent_list(self): - specs = [ - {'name': 'A01', 'target': 'cluster_001'}, - {'name': 'A02', 'target': 'node_001'}, - {'name': 'A03', 'target': 'node_002'}, - {'name': 'A04', 'target': 'node_003'}, - ] - - id_of = {} - for spec in specs: - action = _create_action(self.ctx, **spec) - id_of[spec['name']] = action.id - - db_api.dependency_add(self.ctx, - id_of['A01'], - [id_of['A02'], id_of['A03'], id_of['A04']]) - - res = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(3, len(res)) - self.assertIn(id_of['A02'], res) - self.assertIn(id_of['A03'], res) - self.assertIn(id_of['A04'], res) - res = db_api.dependency_get_depended(self.ctx, id_of['A01']) - self.assertEqual(0, len(res)) - - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - res = db_api.dependency_get_depended(self.ctx, aid) - self.assertEqual(1, len(res)) - self.assertIn(id_of['A01'], res) - res = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(0, len(res)) - action = db_api.action_get(self.ctx, aid) - self.assertEqual(action.status, consts.ACTION_WAITING) - - return id_of - - def _check_dependency_add_depended_list(self): - specs = [ - {'name': 'A01', 'target': 'cluster_001'}, - {'name': 'A02', 'target': 'node_001'}, - {'name': 'A03', 'target': 'node_002'}, - {'name': 'A04', 'target': 'node_003'}, - ] - - id_of = {} - for spec in specs: - action = _create_action(self.ctx, **spec) - id_of[spec['name']] = action.id - - db_api.dependency_add(self.ctx, - [id_of['A02'], id_of['A03'], id_of['A04']], - id_of['A01']) - - res = db_api.dependency_get_depended(self.ctx, id_of['A01']) - self.assertEqual(3, len(res)) - self.assertIn(id_of['A02'], res) - self.assertIn(id_of['A03'], res) - self.assertIn(id_of['A04'], res) - - res = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(0, len(res)) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(action.status, consts.ACTION_WAITING) - - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - res = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(1, len(res)) - self.assertIn(id_of['A01'], res) - res = db_api.dependency_get_depended(self.ctx, aid) - self.assertEqual(0, len(res)) - - return id_of - - def test_dependency_add_depended_list(self): - self._check_dependency_add_depended_list() - - def test_dependency_add_dependent_list(self): - self._check_dependency_add_dependent_list() - - def test_action_mark_succeeded(self): - timestamp = time.time() - id_of = self._check_dependency_add_dependent_list() - - db_api.action_mark_succeeded(self.ctx, id_of['A01'], timestamp) - - res = db_api.dependency_get_depended(self.ctx, id_of['A01']) - self.assertEqual(0, len(res)) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_SUCCEEDED, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - res = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(0, len(res)) - - def _prepare_action_mark_failed_cancel(self): - specs = [ - {'name': 'A01', 'status': 'INIT', 'target': 'cluster_001'}, - {'name': 'A02', 'status': 'INIT', 'target': 'node_001'}, - {'name': 'A03', 'status': 'INIT', 'target': 'node_002', - 'inputs': {'update_parent_status': False}}, - {'name': 'A04', 'status': 'INIT', 'target': 'node_003'}, - {'name': 'A05', 'status': 'INIT', 'target': 'cluster_002'}, - {'name': 'A06', 'status': 'INIT', 'target': 'cluster_003'}, - {'name': 'A07', 'status': 'INIT', 'target': 'cluster_004'}, - ] - - id_of = {} - for spec in specs: - action = _create_action(self.ctx, **spec) - id_of[spec['name']] = action.id - - # A01 has dependents A02, A03, A04 - db_api.dependency_add(self.ctx, - [id_of['A02'], id_of['A03'], id_of['A04']], - id_of['A01']) - - # A05, A06, A07 each has dependent A01 - db_api.dependency_add(self.ctx, - id_of['A01'], - [id_of['A05'], id_of['A06'], id_of['A07']]) - - res = db_api.dependency_get_depended(self.ctx, id_of['A01']) - self.assertEqual(3, len(res)) - self.assertIn(id_of['A02'], res) - self.assertIn(id_of['A03'], res) - self.assertIn(id_of['A04'], res) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_WAITING, action.status) - - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - res = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(1, len(res)) - self.assertIn(id_of['A01'], res) - res = db_api.dependency_get_depended(self.ctx, aid) - self.assertEqual(0, len(res)) - - res = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(3, len(res)) - self.assertIn(id_of['A05'], res) - self.assertIn(id_of['A06'], res) - self.assertIn(id_of['A07'], res) - - for aid in [id_of['A05'], id_of['A06'], id_of['A07']]: - res = db_api.dependency_get_depended(self.ctx, aid) - self.assertEqual(1, len(res)) - self.assertIn(id_of['A01'], res) - - res = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(0, len(res)) - - action = db_api.action_get(self.ctx, aid) - self.assertEqual(consts.ACTION_WAITING, action.status) - - return id_of - - def test_engine_mark_failed_with_depended(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - with db_api.session_for_write() as session: - db_api._mark_engine_failed(session, id_of['A01'], - timestamp, 'BOOM') - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - action = db_api.action_get(self.ctx, aid) - self.assertEqual(consts.ACTION_FAILED, action.status) - self.assertEqual('BOOM', action.status_reason) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_FAILED, action.status) - self.assertEqual('BOOM', action.status_reason) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - for aid in [id_of['A02'], id_of['A03'], id_of['A04']]: - result = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(0, len(result)) - - def test_engine_mark_failed_without_depended(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - with db_api.session_for_write() as session: - db_api._mark_engine_failed(session, id_of['A02'], - timestamp, 'BOOM') - - for aid in [id_of['A03'], id_of['A04']]: - action = db_api.action_get(self.ctx, aid) - self.assertEqual(consts.ACTION_INIT, action.status) - self.assertNotEqual('BOOM', action.status_reason) - self.assertIsNone(action.end_time) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_WAITING, action.status) - self.assertNotEqual('BOOM', action.status_reason) - self.assertIsNone(action.end_time) - - action_d = db_api.action_get(self.ctx, id_of['A02']) - self.assertEqual(consts.ACTION_FAILED, action_d.status) - self.assertEqual('BOOM', action_d.status_reason) - self.assertEqual(round(timestamp, 6), float(action_d.end_time)) - - for aid in [id_of['A03'], id_of['A04']]: - result = db_api.dependency_get_dependents(self.ctx, aid) - self.assertEqual(1, len(result)) - result = db_api.dependency_get_dependents(self.ctx, id_of['A02']) - self.assertEqual(0, len(result)) - - def test_action(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_failed(self.ctx, id_of['A01'], timestamp) - - for aid in [id_of['A05'], id_of['A06'], id_of['A07']]: - action = db_api.action_get(self.ctx, aid) - self.assertEqual(consts.ACTION_FAILED, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(0, len(result)) - - def test_action_mark_failed_parent_status_update_needed(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_failed(self.ctx, id_of['A04'], timestamp) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_FAILED, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(0, len(result)) - - def test_action_mark_failed_parent_status_update_not_needed(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_failed(self.ctx, id_of['A03'], timestamp) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_WAITING, action.status) - self.assertIsNone(action.end_time) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(3, len(result)) - - def test_action_mark_cancelled(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_cancelled(self.ctx, id_of['A01'], timestamp) - - for aid in [id_of['A05'], id_of['A06'], id_of['A07']]: - action = db_api.action_get(self.ctx, aid) - self.assertEqual(consts.ACTION_CANCELLED, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(0, len(result)) - - def test_action_mark_cancelled_parent_status_update_needed(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_cancelled(self.ctx, id_of['A04'], timestamp) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_CANCELLED, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(0, len(result)) - - def test_action_mark_cancelled_parent_status_update_not_needed(self): - timestamp = time.time() - id_of = self._prepare_action_mark_failed_cancel() - db_api.action_mark_cancelled(self.ctx, id_of['A03'], timestamp) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_WAITING, action.status) - self.assertIsNone(action.end_time) - - result = db_api.dependency_get_dependents(self.ctx, id_of['A01']) - self.assertEqual(3, len(result)) - - def test_action_mark_ready(self): - timestamp = time.time() - - specs = [ - {'name': 'A01', 'status': 'INIT', 'target': 'cluster_001'}, - {'name': 'A02', 'status': 'INIT', 'target': 'node_001'}, - {'name': 'A03', 'status': 'INIT', 'target': 'node_002'}, - {'name': 'A04', 'status': 'INIT', 'target': 'node_003'}, - {'name': 'A05', 'status': 'INIT', 'target': 'cluster_002'}, - {'name': 'A06', 'status': 'INIT', 'target': 'cluster_003'}, - {'name': 'A07', 'status': 'INIT', 'target': 'cluster_004'}, - ] - - id_of = {} - for spec in specs: - action = _create_action(self.ctx, **spec) - id_of[spec['name']] = action.id - - db_api.action_mark_ready(self.ctx, id_of['A01'], timestamp) - - action = db_api.action_get(self.ctx, id_of['A01']) - self.assertEqual(consts.ACTION_READY, action.status) - self.assertEqual(round(timestamp, 6), float(action.end_time)) - - def test_action_acquire(self): - action = _create_action(self.ctx) - db_api.action_update(self.ctx, action.id, {'status': 'READY'}) - timestamp = time.time() - action = db_api.action_acquire(self.ctx, action.id, 'worker1', - timestamp) - - self.assertEqual('worker1', action.owner) - self.assertEqual(consts.ACTION_RUNNING, action.status) - self.assertEqual(timestamp, action.start_time) - - action = db_api.action_acquire(self.ctx, action.id, 'worker2', - timestamp) - self.assertIsNone(action) - - def test_action_acquire_failed(self): - action = _create_action(self.ctx) - timestamp = time.time() - action = db_api.action_acquire(self.ctx, action.id, 'worker1', - timestamp) - self.assertIsNone(action) - - def test_action_delete(self): - action = _create_action(self.ctx) - self.assertIsNotNone(action) - res = db_api.action_delete(self.ctx, action.id) - self.assertIsNone(res) - - def test_action_delete_action_in_use(self): - for status in ('WAITING', 'RUNNING', 'SUSPENDED'): - action = _create_action(self.ctx, status=status) - self.assertIsNotNone(action) - ex = self.assertRaises(exception.EResourceBusy, - db_api.action_delete, - self.ctx, action.id) - self.assertEqual("The action '%s' is busy now." % action.id, - str(ex)) - - def test_action_delete_by_target(self): - for name in ['CLUSTER_CREATE', 'CLUSTER_RESIZE', 'CLUSTER_DELETE']: - action = _create_action(self.ctx, action=name, target='CLUSTER_ID') - self.assertIsNotNone(action) - action = _create_action(self.ctx, action=name, - target='CLUSTER_ID_2') - self.assertIsNotNone(action) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(6, len(actions)) - - db_api.action_delete_by_target(self.ctx, 'CLUSTER_ID') - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) - - def test_action_delete_by_target_with_action(self): - for name in ['CLUSTER_CREATE', 'CLUSTER_DELETE', 'CLUSTER_DELETE']: - action = _create_action(self.ctx, action=name, target='CLUSTER_ID') - self.assertIsNotNone(action) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) - - db_api.action_delete_by_target(self.ctx, 'CLUSTER_ID', - action=['CLUSTER_DELETE']) - actions = db_api.action_get_all(self.ctx) - self.assertEqual(1, len(actions)) - self.assertEqual('CLUSTER_CREATE', actions[0].action) - - def test_action_delete_by_target_with_action_excluded(self): - for name in ['CLUSTER_CREATE', 'CLUSTER_RESIZE', 'CLUSTER_DELETE']: - action = _create_action(self.ctx, action=name, target='CLUSTER_ID') - self.assertIsNotNone(action) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) - - db_api.action_delete_by_target(self.ctx, 'CLUSTER_ID', - action_excluded=['CLUSTER_DELETE']) - actions = db_api.action_get_all(self.ctx) - self.assertEqual(1, len(actions)) - self.assertEqual('CLUSTER_DELETE', actions[0].action) - - def test_action_delete_by_target_with_status(self): - action1 = _create_action(self.ctx, action='CLUSTER_CREATE', - target='CLUSTER_ID', status='SUCCEEDED') - action2 = _create_action(self.ctx, action='CLUSTER_DELETE', - target='CLUSTER_ID', status='INIT') - self.assertIsNotNone(action1) - self.assertIsNotNone(action2) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(2, len(actions)) - - db_api.action_delete_by_target(self.ctx, 'CLUSTER_ID', - status=['SUCCEEDED']) - actions = db_api.action_get_all(self.ctx) - self.assertEqual(1, len(actions)) - self.assertEqual('CLUSTER_DELETE', actions[0].action) - - def test_action_delete_by_target_both_specified(self): - for name in ['CLUSTER_CREATE', 'CLUSTER_RESIZE', 'CLUSTER_DELETE']: - action = _create_action(self.ctx, action=name, target='CLUSTER_ID') - self.assertIsNotNone(action) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) - - db_api.action_delete_by_target(self.ctx, 'CLUSTER_ID', - action=['CLUSTER_CREATE'], - action_excluded=['CLUSTER_DELETE']) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) - - def test_action_abandon(self): - spec = { - "owner": "test_owner", - "start_time": 14506893904.0 - } - action = _create_action(self.ctx, **spec) - - before_abandon = db_api.action_get(self.ctx, action.id) - self.assertEqual(spec['owner'], before_abandon.owner) - self.assertEqual(spec['start_time'], before_abandon.start_time) - self.assertIsNone(before_abandon.data) - - db_api.action_abandon(self.ctx, action.id, {}) - after_abandon = db_api.action_get(self.ctx, action.id) - - self.assertIsNone(after_abandon.owner) - self.assertIsNone(after_abandon.start_time) - self.assertEqual('The action was abandoned.', - after_abandon.status_reason) - self.assertEqual(consts.ACTION_READY, after_abandon.status) - self.assertIsNone(after_abandon.data) - - def test_action_abandon_with_params(self): - spec = { - "owner": "test_owner", - "start_time": 14506893904.0 - } - action = _create_action(self.ctx, **spec) - - before_abandon = db_api.action_get(self.ctx, action.id) - self.assertEqual(spec['owner'], before_abandon.owner) - self.assertEqual(spec['start_time'], before_abandon.start_time) - self.assertIsNone(before_abandon.data) - - db_api.action_abandon(self.ctx, action.id, - {'data': {'retries': 1}}) - after_abandon = db_api.action_get(self.ctx, action.id) - - self.assertIsNone(after_abandon.owner) - self.assertIsNone(after_abandon.start_time) - self.assertEqual('The action was abandoned.', - after_abandon.status_reason) - self.assertEqual(consts.ACTION_READY, after_abandon.status) - self.assertEqual({'retries': 1}, after_abandon.data) - - def test_action_purge(self): - old_timestamp = tu.utcnow(True) - datetime.timedelta(days=6) - spec = { - "owner": "test_owner", - "created_at": old_timestamp - } - _create_action(self.ctx, **spec) - _create_action(self.ctx, **spec) - _create_action(self.ctx, **spec) - - new_timestamp = tu.utcnow(True) - spec = { - "owner": "test_owner", - "created_at": new_timestamp - } - _create_action(self.ctx, **spec) - _create_action(self.ctx, **spec) - _create_action(self.ctx, **spec) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(6, len(actions)) - - db_api.action_purge(project=None, granularity='days', age=5) - - actions = db_api.action_get_all(self.ctx) - self.assertEqual(3, len(actions)) diff --git a/senlin/tests/unit/db/test_cluster_api.py b/senlin/tests/unit/db/test_cluster_api.py deleted file mode 100644 index 532fc1108..000000000 --- a/senlin/tests/unit/db/test_cluster_api.py +++ /dev/null @@ -1,508 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_utils import timeutils as tu - -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -UUID1 = shared.UUID1 -UUID2 = shared.UUID2 -UUID3 = shared.UUID3 - - -class DBAPIClusterTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIClusterTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - - def test_cluster_create(self): - cluster = shared.create_cluster(self.ctx, self.profile) - self.assertIsNotNone(cluster.id) - self.assertEqual('db_test_cluster_name', cluster.name) - self.assertEqual(self.profile.id, cluster.profile_id) - self.assertEqual(self.ctx.user_id, cluster.user) - self.assertEqual(self.ctx.project_id, cluster.project) - self.assertEqual('unknown', cluster.domain) - self.assertIsNone(cluster.parent) - self.assertEqual(1, cluster.next_index) - self.assertEqual(60, cluster.timeout) - self.assertEqual(0, cluster.desired_capacity) - self.assertEqual('INIT', cluster.status) - self.assertEqual('Just Initialized', cluster.status_reason) - self.assertIsNone(cluster.created_at) - self.assertIsNone(cluster.updated_at) - self.assertIsNotNone(cluster.init_at) - self.assertEqual({}, cluster.meta_data) - self.assertIsNone(cluster.data) - self.assertEqual({}, cluster.config) - - def test_cluster_get_returns_a_cluster(self): - cluster = shared.create_cluster(self.ctx, self.profile) - ret_cluster = db_api.cluster_get(self.ctx, cluster.id) - self.assertIsNotNone(ret_cluster) - self.assertEqual(cluster.id, ret_cluster.id) - self.assertEqual('db_test_cluster_name', ret_cluster.name) - - def test_cluster_get_not_found(self): - cluster = db_api.cluster_get(self.ctx, UUID1) - self.assertIsNone(cluster) - - def test_cluster_get_from_different_project(self): - cluster = shared.create_cluster(self.ctx, self.profile) - self.ctx.project_id = 'abc' - ret_cluster = db_api.cluster_get(self.ctx, cluster.id, - project_safe=False) - self.assertEqual(cluster.id, ret_cluster.id) - self.assertEqual('db_test_cluster_name', ret_cluster.name) - - cluster = db_api.cluster_get(self.ctx, cluster.id) - self.assertIsNone(cluster) - - def test_cluster_get_with_admin_context(self): - cluster = shared.create_cluster(self.ctx, self.profile) - admin_ctx = utils.dummy_context(project='another-project', - is_admin=True) - ret_cluster = db_api.cluster_get(admin_ctx, cluster.id, - project_safe=True) - self.assertEqual(cluster.id, ret_cluster.id) - self.assertEqual('db_test_cluster_name', ret_cluster.name) - - ret_cluster = db_api.cluster_get(admin_ctx, cluster.id, - project_safe=False) - self.assertEqual(cluster.id, ret_cluster.id) - self.assertEqual('db_test_cluster_name', ret_cluster.name) - - def test_cluster_get_by_name(self): - cluster = shared.create_cluster(self.ctx, self.profile) - ret_cluster = db_api.cluster_get_by_name(self.ctx, cluster.name) - self.assertIsNotNone(ret_cluster) - self.assertEqual(cluster.id, ret_cluster.id) - self.assertEqual('db_test_cluster_name', ret_cluster.name) - - self.assertIsNone(db_api.cluster_get_by_name(self.ctx, 'abc')) - - self.ctx.project_id = 'abc' - self.assertIsNone(db_api.cluster_get_by_name(self.ctx, cluster.name)) - - def test_cluster_get_by_name_diff_project(self): - self.ctx.project_id = UUID2 - cluster1 = shared.create_cluster(self.ctx, self.profile, - name='cluster_A', - project=UUID2) - - shared.create_cluster(self.ctx, self.profile, name='cluster_B', - project=UUID2) - shared.create_cluster(self.ctx, self.profile, name='cluster_B', - project=UUID2) - - self.ctx.project_id = UUID1 - res = db_api.cluster_get_by_name(self.ctx, 'cluster_A') - self.assertIsNone(res) - - self.ctx.project_id = UUID3 - self.assertIsNone(db_api.cluster_get_by_name(self.ctx, - 'cluster_A')) - - self.ctx.project_id = UUID2 - res = db_api.cluster_get_by_name(self.ctx, 'cluster_A') - self.assertEqual(cluster1.id, res.id) - - self.assertRaises(exception.MultipleChoices, - db_api.cluster_get_by_name, - self.ctx, 'cluster_B') - - res = db_api.cluster_get_by_name(self.ctx, 'non-existent') - self.assertIsNone(res) - - def test_cluster_get_by_short_id(self): - cid1 = 'same-part-unique-part' - cid2 = 'same-part-part-unique' - cluster1 = shared.create_cluster(self.ctx, self.profile, - id=cid1, - name='cluster-1') - - cluster2 = shared.create_cluster(self.ctx, self.profile, - id=cid2, - name='cluster-2') - for x in range(len('same-part-')): - self.assertRaises(exception.MultipleChoices, - db_api.cluster_get_by_short_id, - self.ctx, cid1[:x]) - - res = db_api.cluster_get_by_short_id(self.ctx, cid1[:11]) - self.assertEqual(cluster1.id, res.id) - res = db_api.cluster_get_by_short_id(self.ctx, cid2[:11]) - self.assertEqual(cluster2.id, res.id) - res = db_api.cluster_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(res) - - ctx_new = utils.dummy_context(project='different_project_id') - res = db_api.cluster_get_by_short_id(ctx_new, cid1[:11]) - self.assertIsNone(res) - - def test_cluster_get_by_short_id_diff_project(self): - cluster1 = shared.create_cluster(self.ctx, self.profile, - id=UUID1, - name='cluster-1') - - res = db_api.cluster_get_by_short_id(self.ctx, UUID1[:11]) - self.assertEqual(cluster1.id, res.id) - - ctx_new = utils.dummy_context(project='different_project_id') - res = db_api.cluster_get_by_short_id(ctx_new, UUID1[:11]) - self.assertIsNone(res) - - def test_cluster_get_all(self): - values = [ - {'name': 'cluster1'}, - {'name': 'cluster2'}, - {'name': 'cluster3'}, - {'name': 'cluster4'} - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - ret_clusters = db_api.cluster_get_all(self.ctx) - self.assertEqual(4, len(ret_clusters)) - names = [ret_cluster.name for ret_cluster in ret_clusters] - [self.assertIn(val['name'], names) for val in values] - - def test_cluster_get_all_with_regular_project(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - self.ctx.project_id = UUID1 - clusters = db_api.cluster_get_all(self.ctx) - self.assertEqual(2, len(clusters)) - - self.ctx.project_id = UUID2 - clusters = db_api.cluster_get_all(self.ctx) - self.assertEqual(3, len(clusters)) - - self.ctx.project_id = UUID3 - self.assertEqual([], db_api.cluster_get_all(self.ctx)) - - def test_cluster_get_all_with_project_safe_false(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - clusters = db_api.cluster_get_all(self.ctx, project_safe=False) - self.assertEqual(5, len(clusters)) - - def test_cluster_get_all_with_admin_context(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - admin_ctx = utils.dummy_context(project='another-project', - is_admin=True) - clusters = db_api.cluster_get_all(admin_ctx, project_safe=True) - self.assertEqual(5, len(clusters)) - - clusters = db_api.cluster_get_all(admin_ctx, project_safe=False) - self.assertEqual(5, len(clusters)) - - def test_cluster_get_all_with_filters(self): - shared.create_cluster(self.ctx, self.profile, name='foo') - shared.create_cluster(self.ctx, self.profile, name='bar') - - filters = {'name': ['bar', 'quux']} - results = db_api.cluster_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('bar', results[0]['name']) - - filters = {'name': 'foo'} - results = db_api.cluster_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('foo', results[0]['name']) - - def test_cluster_get_all_returns_all_if_no_filters(self): - shared.create_cluster(self.ctx, self.profile) - shared.create_cluster(self.ctx, self.profile) - - filters = None - results = db_api.cluster_get_all(self.ctx, filters=filters) - - self.assertEqual(2, len(results)) - - def test_cluster_get_all_default_sort_dir(self): - clusters = [shared.create_cluster(self.ctx, self.profile, - init_at=tu.utcnow(True)) - for x in range(3)] - - st_db = db_api.cluster_get_all(self.ctx) - self.assertEqual(3, len(st_db)) - self.assertEqual(clusters[0].id, st_db[0].id) - self.assertEqual(clusters[1].id, st_db[1].id) - self.assertEqual(clusters[2].id, st_db[2].id) - - def test_cluster_get_all_str_sort_keys(self): - clusters = [shared.create_cluster(self.ctx, self.profile, - created_at=tu.utcnow(True)) - for x in range(3)] - - st_db = db_api.cluster_get_all(self.ctx, sort='created_at') - self.assertEqual(3, len(st_db)) - self.assertEqual(clusters[0].id, st_db[0].id) - self.assertEqual(clusters[1].id, st_db[1].id) - self.assertEqual(clusters[2].id, st_db[2].id) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_cluster_get_all_filters_sort_keys(self, mock_paginate): - sort = 'name,status,created_at,updated_at' - db_api.cluster_get_all(self.ctx, sort=sort) - - args = mock_paginate.call_args[0] - used_sort_keys = set(args[3]) - expected_keys = set(['name', 'status', 'created_at', - 'updated_at', 'id']) - self.assertEqual(expected_keys, used_sort_keys) - - def test_cluster_get_all_marker(self): - clusters = [shared.create_cluster(self.ctx, self.profile, - created_at=tu.utcnow(True)) - for x in range(3)] - cl_db = db_api.cluster_get_all(self.ctx, marker=clusters[1].id) - self.assertEqual(1, len(cl_db)) - self.assertEqual(clusters[2].id, cl_db[0].id) - - def test_cluster_get_all_non_existing_marker(self): - [shared.create_cluster(self.ctx, self.profile) for x in range(3)] - uuid = "this cluster doesn't exist" - st_db = db_api.cluster_get_all(self.ctx, marker=uuid) - self.assertEqual(3, len(st_db)) - - def test_cluster_next_index(self): - cluster = shared.create_cluster(self.ctx, self.profile) - cluster_id = cluster.id - res = db_api.cluster_get(self.ctx, cluster_id) - self.assertEqual(1, res.next_index) - res = db_api.cluster_next_index(self.ctx, cluster_id) - self.assertEqual(1, res) - res = db_api.cluster_get(self.ctx, cluster_id) - self.assertEqual(2, res.next_index) - res = db_api.cluster_next_index(self.ctx, cluster_id) - self.assertEqual(2, res) - res = db_api.cluster_get(self.ctx, cluster_id) - self.assertEqual(3, res.next_index) - - def test_cluster_count_all(self): - clusters = [shared.create_cluster(self.ctx, self.profile) - for i in range(3)] - - cl_db = db_api.cluster_count_all(self.ctx) - self.assertEqual(3, cl_db) - - db_api.cluster_delete(self.ctx, clusters[0].id) - cl_db = db_api.cluster_count_all(self.ctx) - self.assertEqual(2, cl_db) - - db_api.cluster_delete(self.ctx, clusters[1].id) - cl_db = db_api.cluster_count_all(self.ctx) - self.assertEqual(1, cl_db) - - def test_cluster_count_all_with_regular_project(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - self.ctx.project_id = UUID1 - self.assertEqual(2, db_api.cluster_count_all(self.ctx)) - - self.ctx.project_id = UUID2 - self.assertEqual(3, db_api.cluster_count_all(self.ctx)) - - def test_cluster_count_all_with_project_safe_false(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - self.assertEqual(5, db_api.cluster_count_all(self.ctx, - project_safe=False)) - - def test_cluster_count_all_with_admin_context(self): - values = [ - {'project': UUID1}, - {'project': UUID1}, - {'project': UUID2}, - {'project': UUID2}, - {'project': UUID2}, - ] - [shared.create_cluster(self.ctx, self.profile, **v) for v in values] - - admin_ctx = utils.dummy_context(project='another-project', - is_admin=True) - self.assertEqual(5, db_api.cluster_count_all(admin_ctx, - project_safe=True)) - self.assertEqual(5, db_api.cluster_count_all(admin_ctx, - project_safe=False)) - - def test_cluster_count_all_with_filters(self): - shared.create_cluster(self.ctx, self.profile, name='foo') - shared.create_cluster(self.ctx, self.profile, name='bar') - shared.create_cluster(self.ctx, self.profile, name='bar') - filters = {'name': 'bar'} - - cl_db = db_api.cluster_count_all(self.ctx, filters=filters) - self.assertEqual(2, cl_db) - - def test_cluster_update(self): - cluster = shared.create_cluster(self.ctx, self.profile) - values = { - 'name': 'db_test_cluster_name2', - 'status': 'ERROR', - 'status_reason': "update failed", - 'timeout': 90, - } - db_api.cluster_update(self.ctx, cluster.id, values) - cluster = db_api.cluster_get(self.ctx, cluster.id) - self.assertEqual('db_test_cluster_name2', cluster.name) - self.assertEqual('ERROR', cluster.status) - self.assertEqual('update failed', cluster.status_reason) - self.assertEqual(90, cluster.timeout) - - self.assertRaises(exception.ResourceNotFound, - db_api.cluster_update, self.ctx, UUID2, values) - - def test_nested_cluster_get_by_name(self): - cluster1 = shared.create_cluster(self.ctx, self.profile, - name='cluster1') - cluster2 = shared.create_cluster(self.ctx, self.profile, - name='cluster2', - parent=cluster1.id) - - result = db_api.cluster_get_by_name(self.ctx, 'cluster2') - self.assertEqual(cluster2.id, result.id) - - db_api.cluster_delete(self.ctx, cluster2.id) - result = db_api.cluster_get_by_name(self.ctx, 'cluster2') - self.assertIsNone(result) - - def test_cluster_delete(self): - cluster = shared.create_cluster(self.ctx, self.profile) - cluster_id = cluster.id - node = shared.create_node(self.ctx, cluster, self.profile) - db_api.cluster_delete(self.ctx, cluster_id) - - self.assertIsNone(db_api.cluster_get(self.ctx, cluster_id)) - res = db_api.node_get(self.ctx, node.id) - self.assertIsNone(res) - self.assertRaises(exception.ResourceNotFound, db_api.cluster_delete, - self.ctx, cluster_id) - - # Testing child nodes deletion - res = db_api.node_get(self.ctx, node.id) - self.assertIsNone(res) - - def test_cluster_delete_policies_deleted(self): - # create cluster - cluster = shared.create_cluster(self.ctx, self.profile) - cluster_id = cluster.id - - # create policy - policy_data = { - 'name': 'test_policy', - 'type': 'ScalingPolicy', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'spec': {'foo': 'bar'}, - 'data': None, - } - policy = db_api.policy_create(self.ctx, policy_data) - self.assertIsNotNone(policy) - - # attach policy - fields = { - 'enabled': True, - } - db_api.cluster_policy_attach(self.ctx, cluster_id, policy.id, fields) - binding = db_api.cluster_policy_get(self.ctx, cluster_id, policy.id) - self.assertIsNotNone(binding) - - # now we delete the cluster - db_api.cluster_delete(self.ctx, cluster_id) - - res = db_api.cluster_get(self.ctx, cluster_id) - self.assertIsNone(res) - - # we check the cluster-policy binding - binding = db_api.cluster_policy_get(self.ctx, cluster_id, policy.id) - self.assertIsNone(binding) - - # but the policy is not deleted - result = db_api.policy_get(self.ctx, policy.id) - self.assertIsNotNone(result) - - def test_cluster_add_dependents(self): - cluster = shared.create_cluster(self.ctx, self.profile) - profile_id = 'profile1' - db_api.cluster_add_dependents(self.ctx, cluster.id, profile_id) - res = db_api.cluster_get(self.ctx, cluster.id) - self.assertEqual(['profile1'], res.dependents['profiles']) - deps = {} - cluster = shared.create_cluster(self.ctx, self.profile, - dependents=deps) - db_api.cluster_add_dependents(self.ctx, cluster.id, profile_id) - res = db_api.cluster_get(self.ctx, cluster.id) - deps = {'profiles': ['profile1']} - self.assertEqual(deps, res.dependents) - db_api.cluster_add_dependents(self.ctx, cluster.id, 'profile2') - res = db_api.cluster_get(self.ctx, cluster.id) - deps = {'profiles': ['profile1', 'profile2']} - self.assertEqual(deps, res.dependents) - - def test_cluster_remove_dependents(self): - deps = {'profiles': ['profile1', 'profile2']} - cluster = shared.create_cluster(self.ctx, self.profile, - dependents=deps) - db_api.cluster_remove_dependents(self.ctx, cluster.id, 'profile1') - res = db_api.cluster_get(self.ctx, cluster.id) - deps = {'profiles': ['profile2']} - self.assertEqual(deps, res.dependents) - db_api.cluster_remove_dependents(self.ctx, cluster.id, 'profile2') - res = db_api.cluster_get(self.ctx, cluster.id) - deps = {} - self.assertEqual(deps, res.dependents) diff --git a/senlin/tests/unit/db/test_cluster_policy_api.py b/senlin/tests/unit/db/test_cluster_policy_api.py deleted file mode 100644 index e9baf833f..000000000 --- a/senlin/tests/unit/db/test_cluster_policy_api.py +++ /dev/null @@ -1,336 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - - -class DBAPIClusterPolicyTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIClusterPolicyTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - - def create_policy(self, **kwargs): - data = { - 'name': 'test_policy', - 'type': 'ScalingPolicy', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'spec': { - 'min_size': 1, - 'max_size': 10, - 'paust_time': 'PT10M', - }, - 'data': None, - } - - data.update(kwargs) - return db_api.policy_create(self.ctx, data) - - def test_policy_attach_detach(self): - policy = self.create_policy() - - fields = { - 'enabled': True, - } - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, - fields) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertTrue(bindings[0].enabled) - - # This will succeed - db_api.cluster_policy_detach(self.ctx, self.cluster.id, policy.id) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(0, len(bindings)) - - # This will fail silently - res = db_api.cluster_policy_detach(self.ctx, self.cluster.id, 'BOGUS') - self.assertIsNone(res) - - def test_policy_enable_disable(self): - policy = self.create_policy() - - fields = { - 'enabled': True, - } - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, - fields) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertTrue(bindings[0].enabled) - - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - {'enabled': True}) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertTrue(bindings[0].enabled) - - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - {'enabled': False}) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertFalse(bindings[0].enabled) - - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - {'enabled': True}) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertTrue(bindings[0].enabled) - - # No policy binding found - res = db_api.cluster_policy_update(self.ctx, self.cluster.id, 'BOGUS', - {}) - self.assertIsNone(res) - - def test_policy_update_with_data(self): - policy = self.create_policy() - - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {}) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertIsNone(bindings[0].data) - - fields = {'data': {'foo': 'bar'}} - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - fields) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertEqual({'foo': 'bar'}, bindings[0].data) - - fields = {'data': {'foo': 'BAR'}} - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - fields) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertEqual({'foo': 'BAR'}, bindings[0].data) - - def test_policy_update_last_op(self): - policy = self.create_policy() - - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {}) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertIsNone(bindings[0].last_op) - - timestamp = tu.utcnow(True) - fields = {'last_op': timestamp} - db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id, - fields) - bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id) - self.assertEqual(1, len(bindings)) - self.assertEqual(timestamp, bindings[0].last_op) - - def test_cluster_policy_get(self): - policy = self.create_policy() - - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {}) - - binding = db_api.cluster_policy_get(self.ctx, self.cluster.id, - policy.id) - self.assertIsNotNone(binding) - self.assertEqual(self.cluster.id, binding.cluster_id) - self.assertEqual(policy.id, binding.policy_id) - - def test_policy_get_all_with_empty_filters(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {}) - - filters = None - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_policy_get_all_with_sort_key_are_used(self, mock_paginate): - values = { - 'policy1': {'enabled': True}, - 'policy2': {'enabled': True}, - 'policy3': {'enabled': True} - } - - # prepare - for key in values: - value = values[key] - policy_id = self.create_policy(id=key).id - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id, - value) - - sort = consts.CLUSTER_POLICY_SORT_KEYS - db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - sort=','.join(sort)) - - # Check sort_keys used - args = mock_paginate.call_args[0] - sort.append('id') - self.assertEqual(set(sort), set(args[3])) - - def test_policy_get_all_with_sorting(self): - values = { - 'policy1': {'enabled': True}, - 'policy2': {'enabled': True}, - 'policy3': {'enabled': False} - } - - # prepare - for key in values: - value = values[key] - policy_id = self.create_policy(id=key).id - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id, - value) - - # sorted by enabled, the 2nd and 3rd are unpredictable - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - sort='enabled') - self.assertEqual('policy3', results[0].policy_id) - - def test_policy_get_all_by_policy_type(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {}) - - results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id, - 'ScalingPolicy') - self.assertEqual(2, len(results)) - - results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id, - 'UnknownPolicy') - self.assertEqual(0, len(results)) - - def test_policy_get_all_by_policy_name(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {}) - - results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id, - 'test_policy') - self.assertEqual(2, len(results)) - - results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id, - 'unknown_policy') - self.assertEqual(0, len(results)) - - def test_policy_get_all_by_policy_type_with_filter(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, - {'enabled': True}) - - filters = {'enabled': True} - results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id, - 'ScalingPolicy', - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': False} - results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id, - 'ScalingPolicy', - filters=filters) - self.assertEqual(0, len(results)) - - def test_policy_get_all_by_policy_name_with_filter(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, - {'enabled': True}) - - filters = {'enabled': True} - results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id, - 'test_policy', - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': False} - results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id, - 'test_policy', - filters=filters) - self.assertEqual(0, len(results)) - - def test_policy_get_all_with_all_filters(self): - for pid in ['policy1', 'policy2']: - self.create_policy(id=pid) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, - {'enabled': True}) - - filters = {'enabled': True, - 'policy_name': 'test_policy', - 'policy_type': 'ScalingPolicy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': True, - 'policy_type': 'ScalingPolicy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': True, - 'policy_name': 'test_policy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': True, - 'policy_name': 'wrong_name', - 'policy_type': 'wrong_type'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(0, len(results)) - - filters = {'enabled': True, - 'policy_name': 'wrong_name', - 'policy_type': 'ScalingPolicy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(0, len(results)) - - filters = {'policy_name': 'test_policy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'policy_type': 'ScalingPolicy'} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(2, len(results)) - - filters = {'enabled': False} - results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id, - filters=filters) - self.assertEqual(0, len(results)) - - def test_cluster_policy_ids_by_cluster(self): - # prepare - ids = [] - for i in range(3): - policy_id = self.create_policy().id - ids.append(policy_id) - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id, - {'enabled': True}) - - # sorted by enabled, the 2nd and 3rd are unpredictable - results = db_api.cluster_policy_ids_by_cluster(self.ctx, - self.cluster.id) - self.assertEqual(set(ids), set(results)) diff --git a/senlin/tests/unit/db/test_cred_api.py b/senlin/tests/unit/db/test_cred_api.py deleted file mode 100644 index 1f7d335ce..000000000 --- a/senlin/tests/unit/db/test_cred_api.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -USER_ID = shared.UUID1 -PROJECT_ID = '26e4df6952b144e5823aae7ce463a240' -values = { - 'user': USER_ID, - 'project': PROJECT_ID, - 'cred': { - 'openstack': { - 'trust': '01234567890123456789012345678901', - }, - }, - 'data': {} -} - - -class DBAPICredentialTest(base.SenlinTestCase): - - def setUp(self): - super(DBAPICredentialTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_cred_create(self): - cred = db_api.cred_create(self.ctx, values) - self.assertIsNotNone(cred) - self.assertEqual(USER_ID, cred.user) - self.assertEqual(PROJECT_ID, cred.project) - self.assertEqual( - {'openstack': {'trust': '01234567890123456789012345678901'}}, - cred.cred) - self.assertEqual({}, cred.data) - - def test_cred_get(self): - cred = db_api.cred_get(self.ctx, USER_ID, PROJECT_ID) - self.assertIsNone(cred) - - db_api.cred_create(self.ctx, values) - - cred = db_api.cred_get(self.ctx, USER_ID, PROJECT_ID) - self.assertIsNotNone(cred) - self.assertEqual(USER_ID, cred.user) - self.assertEqual(PROJECT_ID, cred.project) - self.assertEqual( - {'openstack': {'trust': '01234567890123456789012345678901'}}, - cred.cred) - self.assertEqual({}, cred.data) - - def test_cred_update(self): - db_api.cred_create(self.ctx, values) - new_values = { - 'cred': { - 'openstack': { - 'trust': 'newtrust' - } - } - } - db_api.cred_update(self.ctx, USER_ID, PROJECT_ID, new_values) - cred = db_api.cred_get(self.ctx, USER_ID, PROJECT_ID) - self.assertIsNotNone(cred) - self.assertEqual({'openstack': {'trust': 'newtrust'}}, - cred.cred) - - def test_cred_delete(self): - cred = db_api.cred_delete(self.ctx, USER_ID, PROJECT_ID) - self.assertIsNone(cred) - - db_api.cred_create(self.ctx, values) - cred = db_api.cred_delete(self.ctx, USER_ID, PROJECT_ID) - self.assertIsNone(cred) - - def test_cred_create_update(self): - cred = db_api.cred_create_update(self.ctx, values) - self.assertIsNotNone(cred) - self.assertEqual(USER_ID, cred.user) - self.assertEqual(PROJECT_ID, cred.project) - self.assertEqual( - {'openstack': {'trust': '01234567890123456789012345678901'}}, - cred.cred) - self.assertEqual({}, cred.data) - - new_values = copy.deepcopy(values) - new_values['cred']['openstack']['trust'] = 'newtrust' - cred = db_api.cred_create_update(self.ctx, new_values) - self.assertEqual(USER_ID, cred.user) - self.assertEqual(PROJECT_ID, cred.project) - self.assertEqual( - {'openstack': {'trust': 'newtrust'}}, - cred.cred) diff --git a/senlin/tests/unit/db/test_event_api.py b/senlin/tests/unit/db/test_event_api.py deleted file mode 100644 index a6445b1c8..000000000 --- a/senlin/tests/unit/db/test_event_api.py +++ /dev/null @@ -1,552 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_utils import reflection -from oslo_utils import timeutils as tu - -from senlin.common import utils as common_utils -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -UUID1 = shared.UUID1 -UUID2 = shared.UUID2 -UUID3 = shared.UUID3 - - -class DBAPIEventTest(base.SenlinTestCase): - - def setUp(self): - super(DBAPIEventTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - - def create_event(self, ctx, timestamp=None, level=logging.INFO, - entity=None, action=None, status=None, - status_reason=None): - - fake_timestamp = tu.parse_strtime( - '2014-12-19 11:51:54.670244', '%Y-%m-%d %H:%M:%S.%f') - - if entity: - e_name = reflection.get_class_name(entity, fully_qualified=False) - type_name = e_name.upper() - if type_name == 'CLUSTER': - cluster_id = entity.id - elif type_name == 'NODE': - cluster_id = entity.cluster_id - else: - cluster_id = '' - else: - type_name = '' - cluster_id = '' - - values = { - 'timestamp': timestamp or fake_timestamp, - 'level': level, - 'oid': entity.id if entity else '', - 'oname': entity.name if entity else '', - 'otype': type_name, - 'cluster_id': cluster_id, - 'action': action or '', - 'status': status or '', - 'status_reason': status_reason or '', - 'user': ctx.user_id, - 'project': ctx.project_id, - } - - # Make sure all fields can be customized - return db_api.event_create(ctx, values) - - def test_event_create_get(self): - event = self.create_event(self.ctx) - ret_event = db_api.event_get(self.ctx, event.id) - self.assertIsNotNone(ret_event) - tst_timestamp = tu.parse_strtime('2014-12-19 11:51:54.670244', - '%Y-%m-%d %H:%M:%S.%f') - - self.assertEqual(common_utils.isotime(tst_timestamp), - common_utils.isotime(ret_event.timestamp)) - self.assertEqual('20', ret_event.level) - self.assertEqual('', ret_event.oid) - self.assertEqual('', ret_event.otype) - self.assertEqual('', ret_event.oname) - self.assertEqual('', ret_event.action) - self.assertEqual('', ret_event.status) - self.assertEqual('', ret_event.status_reason) - self.assertEqual(self.ctx.user_id, ret_event.user) - self.assertEqual(self.ctx.project_id, ret_event.project) - - def test_event_get_diff_project(self): - event = self.create_event(self.ctx) - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.event_get(new_ctx, event.id) - self.assertIsNone(res) - res = db_api.event_get(new_ctx, event.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(event.id, res.id) - - def test_event_get_admin_context(self): - event = self.create_event(self.ctx) - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - res = db_api.event_get(admin_ctx, event.id, project_safe=True) - self.assertIsNotNone(res) - res = db_api.event_get(admin_ctx, event.id, project_safe=False) - self.assertIsNotNone(res) - - def test_event_get_by_short_id(self): - event = self.create_event(self.ctx) - short_id = event.id[:6] - ret_event = db_api.event_get_by_short_id(self.ctx, short_id) - self.assertIsNotNone(ret_event) - - short_id = event.id[:8] - ret_event = db_api.event_get_by_short_id(self.ctx, short_id) - self.assertIsNotNone(ret_event) - - ret_event = db_api.event_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(ret_event) - - def test_event_get_by_short_id_diff_project(self): - event = self.create_event(self.ctx) - - new_ctx = utils.dummy_context(project='a-different-project') - short_id = event.id[:8] - res = db_api.event_get_by_short_id(new_ctx, short_id) - self.assertIsNone(res) - res = db_api.event_get_by_short_id(new_ctx, short_id, - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(event.id, res.id) - - def test_event_get_all(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster2) - - # Default project_safe - events = db_api.event_get_all(self.ctx) - self.assertEqual(3, len(events)) - - cluster_ids = [event.oid for event in events] - onames = [event.oname for event in events] - - self.assertIn(cluster1.id, cluster_ids) - self.assertIn(cluster1.name, onames) - self.assertIn(cluster2.id, cluster_ids) - self.assertIn(cluster2.name, onames) - - def test_event_get_all_with_limit(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - - events = db_api.event_get_all(self.ctx) - self.assertEqual(3, len(events)) - - events = db_api.event_get_all(self.ctx, limit=1) - self.assertEqual(1, len(events)) - - events = db_api.event_get_all(self.ctx, limit=2) - self.assertEqual(2, len(events)) - - def test_event_get_all_with_limit_and_marker(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - - events_all = db_api.event_get_all(self.ctx) - self.assertEqual(3, len(events_all)) - - marker = events_all[0].id - event1_id = events_all[1].id - event2_id = events_all[2].id - events = db_api.event_get_all(self.ctx, limit=1, marker=marker) - self.assertEqual(1, len(events)) - self.assertEqual(event1_id, events[0].id) - - events = db_api.event_get_all(self.ctx, limit=2, marker=marker) - self.assertEqual(2, len(events)) - self.assertEqual(event1_id, events[0].id) - self.assertEqual(event2_id, events[1].id) - - marker = event1_id - events = db_api.event_get_all(self.ctx, limit=1, marker=marker) - self.assertEqual(1, len(events)) - self.assertEqual(event2_id, events[0].id) - - def test_event_get_all_with_sorting(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - - event1 = self.create_event(self.ctx, entity=cluster1, - timestamp=tu.utcnow(True), - action='action2') - event2 = self.create_event(self.ctx, entity=cluster1, - timestamp=tu.utcnow(True), - action='action3') - event3 = self.create_event(self.ctx, entity=cluster1, - timestamp=tu.utcnow(True), - action='action1') - - events = db_api.event_get_all(self.ctx, sort='timestamp') - self.assertEqual(event1.id, events[0].id) - self.assertEqual(event2.id, events[1].id) - self.assertEqual(event3.id, events[2].id) - - events = db_api.event_get_all(self.ctx, sort='timestamp:desc') - self.assertEqual(event1.id, events[2].id) - self.assertEqual(event2.id, events[1].id) - self.assertEqual(event3.id, events[0].id) - - events = db_api.event_get_all(self.ctx, sort='action') - self.assertEqual(event1.id, events[1].id) - self.assertEqual(event2.id, events[2].id) - self.assertEqual(event3.id, events[0].id) - - events = db_api.event_get_all(self.ctx, sort='action:desc') - self.assertEqual(event1.id, events[1].id) - self.assertEqual(event2.id, events[0].id) - self.assertEqual(event3.id, events[2].id) - - def test_event_get_all_project_safe(self): - self.ctx.project_id = 'project_1' - cluster1 = shared.create_cluster(self.ctx, self.profile, - name='cluster1') - self.create_event(self.ctx, entity=cluster1) - self.ctx.project_id = 'project_2' - cluster2 = shared.create_cluster(self.ctx, self.profile, - name='cluster2') - self.create_event(self.ctx, entity=cluster2, action='CLUSTER_CREATE') - self.create_event(self.ctx, entity=cluster2, action='CLUSTER_DELETE') - - # Default project_safe to true, only the last two events are visible - events = db_api.event_get_all(self.ctx) - self.assertEqual(2, len(events)) - - oids = [event.oid for event in events] - onames = [event.oname for event in events] - self.assertNotIn(cluster1.id, oids) - self.assertNotIn(cluster1.name, onames) - self.assertIn(cluster2.id, oids) - self.assertIn(cluster2.name, onames) - - # Set project_safe to false, we should get all three events - events = db_api.event_get_all(self.ctx, project_safe=False) - self.assertEqual(3, len(events)) - - oids = [event.oid for event in events] - onames = [event.oname for event in events] - self.assertIn(cluster1.id, oids) - self.assertIn(cluster1.name, onames) - self.assertIn(cluster2.id, oids) - self.assertIn(cluster2.name, onames) - - def test_event_get_all_admin_context(self): - self.ctx.project_id = 'project_1' - cluster1 = shared.create_cluster(self.ctx, self.profile, - name='cluster1') - self.create_event(self.ctx, entity=cluster1) - self.ctx.project_id = 'project_2' - cluster2 = shared.create_cluster(self.ctx, self.profile, - name='cluster2') - self.create_event(self.ctx, entity=cluster2, action='CLUSTER_CREATE') - self.create_event(self.ctx, entity=cluster2, action='CLUSTER_DELETE') - - admin_ctx = utils.dummy_context(project='another-project', - is_admin=True) - events = db_api.event_get_all(admin_ctx, project_safe=True) - self.assertEqual(3, len(events)) - - events = db_api.event_get_all(admin_ctx, project_safe=False) - self.assertEqual(3, len(events)) - - def test_event_get_all_by_cluster(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - node1_1 = shared.create_node(self.ctx, cluster1, self.profile) - node1_2 = shared.create_node(self.ctx, cluster1, self.profile) - node2_1 = shared.create_node(self.ctx, cluster2, self.profile) - node_orphan = shared.create_node(self.ctx, None, self.profile) - - # 1 event for cluster 1 - self.create_event(self.ctx, entity=cluster1) - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(events)) - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(0, len(events)) - - # two more events for cluster 1, with one for an orphan node - self.create_event(self.ctx, entity=node1_1) - self.create_event(self.ctx, entity=node1_2) - self.create_event(self.ctx, entity=node_orphan) - - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(3, len(events)) - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(0, len(events)) - - # one more events for cluster 2, with one for an orphan node - self.create_event(self.ctx, entity=cluster2) - self.create_event(self.ctx, entity=node_orphan) - - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(3, len(events)) - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(1, len(events)) - - # two more events for cluster 2, with one for an orphan node - self.create_event(self.ctx, entity=node2_1) - self.create_event(self.ctx, entity=node2_1) - self.create_event(self.ctx, entity=node_orphan) - - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(3, len(events)) - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(3, len(events)) - - # two more events for cluster 1, with one for an orphan node - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=node1_1) - self.create_event(self.ctx, entity=node_orphan) - - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(5, len(events)) - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(3, len(events)) - - def test_event_get_all_by_cluster_diff_project(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster2) - - new_ctx = utils.dummy_context(project='a-different-project') - events = db_api.event_get_all_by_cluster(new_ctx, cluster1.id) - self.assertEqual(0, len(events)) - events = db_api.event_get_all_by_cluster(new_ctx, cluster1.id, - project_safe=False) - self.assertEqual(1, len(events)) - - def test_event_get_all_by_cluster_admin_context(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - events = db_api.event_get_all_by_cluster(admin_ctx, cluster1.id, - project_safe=True) - self.assertEqual(2, len(events)) - events = db_api.event_get_all_by_cluster(admin_ctx, cluster1.id, - project_safe=False) - self.assertEqual(2, len(events)) - - def test_event_count_all_by_cluster(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - node1_1 = shared.create_node(self.ctx, cluster1, self.profile) - node_orphan = shared.create_node(self.ctx, None, self.profile) - - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - - self.assertEqual(2, db_api.event_count_by_cluster(self.ctx, - cluster1.id)) - self.assertEqual(0, db_api.event_count_by_cluster(self.ctx, - cluster2.id)) - - # No change if event is not related to a cluster - self.create_event(self.ctx, entity=self.profile) - - self.assertEqual(2, db_api.event_count_by_cluster(self.ctx, - cluster1.id)) - self.assertEqual(0, db_api.event_count_by_cluster(self.ctx, - cluster2.id)) - - # Node level events account to cluster - self.create_event(self.ctx, entity=node1_1) - self.assertEqual(3, db_api.event_count_by_cluster(self.ctx, - cluster1.id)) - self.assertEqual(0, db_api.event_count_by_cluster(self.ctx, - cluster2.id)) - - # Node level events account to cluster, but not for orphan nodes - self.create_event(self.ctx, entity=node_orphan) - self.assertEqual(3, db_api.event_count_by_cluster(self.ctx, - cluster1.id)) - self.assertEqual(0, db_api.event_count_by_cluster(self.ctx, - cluster2.id)) - # Another cluster - self.create_event(self.ctx, entity=cluster2) - self.assertEqual(3, db_api.event_count_by_cluster(self.ctx, - cluster1.id)) - self.assertEqual(1, db_api.event_count_by_cluster(self.ctx, - cluster2.id)) - - def test_event_count_all_by_cluster_diff_project(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster2) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.event_count_by_cluster(new_ctx, cluster1.id) - self.assertEqual(0, res) - res = db_api.event_count_by_cluster(new_ctx, cluster1.id, - project_safe=False) - self.assertEqual(1, res) - - def test_event_count_all_by_cluster_admin_context(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - self.create_event(self.ctx, entity=cluster1) - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - - res = db_api.event_count_by_cluster(admin_ctx, cluster1.id, - project_safe=True) - self.assertEqual(1, res) - - res = db_api.event_count_by_cluster(admin_ctx, cluster1.id, - project_safe=False) - self.assertEqual(1, res) - - def test_event_get_all_filtered(self): - cluster1 = shared.create_cluster(self.ctx, self.profile, - name='cluster1') - cluster2 = shared.create_cluster(self.ctx, self.profile, - name='cluster2') - - self.create_event(self.ctx, entity=cluster1, action='CLUSTER_CREATE') - self.create_event(self.ctx, entity=cluster1, action='CLUSTER_DELETE') - self.create_event(self.ctx, entity=cluster2, action='CLUSTER_CREATE') - - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(2, len(events)) - - # test filter by action - filters = {'action': 'CLUSTER_CREATE'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id, - filters=filters) - self.assertEqual(1, len(events)) - self.assertEqual('CLUSTER_CREATE', events[0].action) - - filters = {'action': 'CLUSTER_UPDATE'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id, - filters=filters) - self.assertEqual(0, len(events)) - - # test filter by oname - filters = {'oname': 'cluster1'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id, - filters=filters) - self.assertEqual(2, len(events)) - self.assertEqual('cluster1', events[0].oname) - self.assertEqual('cluster1', events[1].oname) - - filters = {'oname': 'cluster3'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id, - filters=filters) - self.assertEqual(0, len(events)) - - # test filter by otype - filters = {'otype': 'CLUSTER'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id, - filters=filters) - self.assertEqual(1, len(events)) - self.assertEqual('CLUSTER', events[0].otype) - - filters = {'otype': 'NODE'} - events = db_api.event_get_all_by_cluster(self.ctx, cluster2.id, - filters=filters) - self.assertEqual(0, len(events)) - - # test limit and marker - events_all = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - marker = events_all[0].id - expected = events_all[1].id - events = db_api.event_get_all_by_cluster(self.ctx, cluster1.id, - limit=1, marker=marker) - self.assertEqual(1, len(events)) - self.assertEqual(expected, events[0].id) - - def test_event_prune(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - node1_1 = shared.create_node(self.ctx, cluster1, self.profile) - node_orphan = shared.create_node(self.ctx, None, self.profile) - - # prune 1: cluster events - self.create_event(self.ctx, entity=cluster1) - self.create_event(self.ctx, entity=cluster1) - - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(2, len(res)) - db_api.event_prune(self.ctx, cluster1.id) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - - # prune 2: Node level events account to cluster - self.create_event(self.ctx, entity=node1_1) - - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(res)) - db_api.event_prune(self.ctx, cluster1.id) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - - # prune 3: Events related to orphan nodes - # no impact here and no error given - self.create_event(self.ctx, entity=node_orphan) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - db_api.event_prune(self.ctx, cluster1.id) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - - # prune 4: Another cluster - # no impact here and no error given - self.create_event(self.ctx, entity=cluster2) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - db_api.event_prune(self.ctx, cluster1.id) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(res)) - - def test_event_purge(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - node1_1 = shared.create_node(self.ctx, cluster1, self.profile) - node1_2 = shared.create_node(self.ctx, cluster1, self.profile) - - self.create_event(self.ctx, entity=cluster1, status='start') - self.create_event(self.ctx, entity=cluster1, status='end') - self.create_event(self.ctx, entity=node1_1, status='start') - self.create_event(self.ctx, entity=node1_1, status='end') - timestamp = tu.utcnow() - self.create_event(self.ctx, timestamp=timestamp, - entity=node1_2, status='start') - - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(5, len(res)) - db_api.event_purge(project=None, granularity='days', age=5) - res = db_api.event_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(res)) diff --git a/senlin/tests/unit/db/test_lock_api.py b/senlin/tests/unit/db/test_lock_api.py deleted file mode 100644 index 83cb19122..000000000 --- a/senlin/tests/unit/db/test_lock_api.py +++ /dev/null @@ -1,555 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import uuidutils -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -UUID1 = shared.UUID1 -UUID2 = shared.UUID2 -UUID3 = shared.UUID3 - - -class DBAPILockTest(base.SenlinTestCase): - def setUp(self): - super(DBAPILockTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - self.node = shared.create_node(self.ctx, self.cluster, self.profile) - - def test_cluster_lock_cluster_scope(self): - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, -1) - self.assertIn(UUID1, observed) - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, -1) - self.assertNotIn(UUID2, observed) - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, -1) - self.assertFalse(observed) - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) - self.assertTrue(observed) - - def test_cluster_lock_node_scope(self): - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, 1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, 1) - self.assertIn(UUID1, observed) - self.assertIn(UUID2, observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, 1) - self.assertIn(UUID1, observed) - self.assertIn(UUID2, observed) - self.assertEqual(2, len(observed)) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, 1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, 1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, 1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, 1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, 1) - self.assertFalse(observed) - - def test_cluster_lock_cluster_lock_first(self): - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, -1) - self.assertIn(UUID1, observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, -1) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, -1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, 1) - self.assertNotIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, 1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, -1) - self.assertIn(UUID2, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, -1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, 1) - self.assertIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, 1) - self.assertTrue(observed) - - def test_cluster_lock_node_lock_first(self): - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, 1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, 1) - self.assertIn(UUID1, observed) - self.assertIn(UUID2, observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, -1) - self.assertIn(UUID1, observed) - self.assertIn(UUID2, observed) - self.assertNotIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, -1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, 1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, -1) - self.assertNotIn(UUID1, observed) - self.assertIn(UUID2, observed) - self.assertNotIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, -1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, 1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, 1) - self.assertFalse(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, -1) - self.assertIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID3, -1) - self.assertTrue(observed) - - def test_cluster_lock_steal(self): - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, -1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_steal(self.cluster.id, UUID1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_steal(self.cluster.id, UUID2) - self.assertNotIn(UUID1, observed) - self.assertIn(UUID2, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID2, -1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_steal(self.cluster.id, UUID1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) - self.assertTrue(observed) - - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, 1) - self.assertIn(UUID3, observed) - self.assertNotIn(UUID1, observed) - self.assertNotIn(UUID2, observed) - - observed = db_api.cluster_lock_steal(self.cluster.id, UUID1) - self.assertIn(UUID1, observed) - self.assertNotIn(UUID3, observed) - - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) - self.assertTrue(observed) - - def test_cluster_is_locked(self): - # newly created cluster should not be locked - observed = db_api.cluster_is_locked(self.cluster.id) - self.assertFalse(observed) - - # lock cluster - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, -1) - self.assertIn(UUID1, observed) - - # cluster should be locked - observed = db_api.cluster_is_locked(self.cluster.id) - self.assertTrue(observed) - - # release cluster lock - observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) - self.assertTrue(observed) - - # cluster should not be locked anymore - observed = db_api.cluster_is_locked(self.cluster.id) - self.assertFalse(observed) - - def test_node_lock_acquire_release(self): - observed = db_api.node_lock_acquire(self.node.id, UUID1) - self.assertEqual(UUID1, observed) - - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID1, observed) - - observed = db_api.node_lock_release(self.node.id, UUID2) - self.assertFalse(observed) - - observed = db_api.node_lock_release(self.node.id, UUID1) - self.assertTrue(observed) - - observed = db_api.node_lock_release(self.node.id, UUID1) - self.assertFalse(observed) - - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - observed = db_api.node_lock_release(self.node.id, UUID2) - self.assertTrue(observed) - - def test_node_lock_steal(self): - observed = db_api.node_lock_steal(self.node.id, UUID1) - self.assertEqual(UUID1, observed) - - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID1, observed) - - observed = db_api.node_lock_release(self.node.id, UUID2) - self.assertFalse(observed) - - observed = db_api.node_lock_release(self.node.id, UUID1) - self.assertTrue(observed) - - observed = db_api.node_lock_acquire(self.node.id, UUID1) - self.assertEqual(UUID1, observed) - - observed = db_api.node_lock_steal(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - observed = db_api.node_lock_release(self.node.id, UUID1) - self.assertFalse(observed) - - observed = db_api.node_lock_release(self.node.id, UUID2) - self.assertTrue(observed) - - def test_node_is_locked(self): - # newly created node should not be locked - observed = db_api.node_is_locked(self.node.id) - self.assertFalse(observed) - - # lock node - observed = db_api.node_lock_acquire(self.node.id, UUID1) - self.assertIn(UUID1, observed) - - # node should be locked - observed = db_api.node_is_locked(self.node.id) - self.assertTrue(observed) - - # release node lock - observed = db_api.node_lock_release(self.node.id, UUID1) - self.assertTrue(observed) - - # node should not be locked anymore - observed = db_api.node_is_locked(self.node.id) - self.assertFalse(observed) - - -class GCByEngineTest(base.SenlinTestCase): - - def setUp(self): - super(GCByEngineTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - self.node = shared.create_node(self.ctx, self.cluster, self.profile) - - def test_delete_cluster_lock(self): - # Test the case that a single cluster-scope clock can be released - # - # (dead-engine) --> Action --> ClusterLock - # |action|owner| |cluster|action|scope| - # | A1 | E1 | |C1 |[A1] |-1 | - - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.cluster.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, -1) - - # do it - db_api.gc_by_engine(engine_id) - - # assertion - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, -1) - self.assertIn(UUID2, observed) - self.assertNotIn(action.id, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - def test_delete_cluster_lock_and_node_lock_1(self): - # Test the case that an action is about node that also locked a - # cluster and the cluster lock can be released - # - # (dead-engine) --> Action --> NodeLock - # |action|owner| |node |action| - # | A1 | E1 | |N1 |A1 | - # --> ClusterLock - # |cluster|action|scope| - # |C1 |[A1] |1 | - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.node.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, 1) - db_api.node_lock_acquire(self.cluster.id, action.id) - - # do it - db_api.gc_by_engine(engine_id) - - # assertion - # even a read lock is okay now - observed = db_api.cluster_lock_acquire(self.node.id, UUID2, 1) - self.assertIn(UUID2, observed) - self.assertNotIn(action.id, observed) - - # node can be locked again - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - def test_delete_cluster_lock_and_node_lock_2(self): - # Test the case that an action is about node that also locked a - # cluster and the cluster lock will remain locked - # - # (dead-engine) --> Action --> NodeLock - # |action|owner| |node |action| - # | A1 | E1 | |N1 |A1 | - # --> ClusterLock - # |cluster|action |scope| - # |C1 |[A1, A2]|2 | - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.node.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, 1) - db_api.cluster_lock_acquire(self.cluster.id, UUID2, 1) - db_api.node_lock_acquire(self.node.id, action.id) - - # do it - db_api.gc_by_engine(engine_id) - - # assertion - # a read lock is okay now and cluster lock state not broken - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, 1) - self.assertIn(UUID2, observed) - self.assertIn(UUID3, observed) - self.assertNotIn(action.id, observed) - - # node can be locked again - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - -class DummyGCByEngineTest(base.SenlinTestCase): - - def setUp(self): - super(DummyGCByEngineTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - self.node = shared.create_node(self.ctx, self.cluster, self.profile) - - def test_delete_cluster_lock(self): - # Test the case that a single cluster-scope clock can be released - # - # (dead-engine) --> Action --> ClusterLock - # |action|owner| |cluster|action|scope| - # | A1 | E1 | |C1 |[A1] |-1 | - - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.cluster.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, -1) - - # do it - db_api.dummy_gc(engine_id) - - # assertion - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID2, -1) - self.assertIn(UUID2, observed) - self.assertNotIn(action.id, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - def test_delete_cluster_lock_and_node_lock_1(self): - # Test the case that an action is about node that also locked a - # cluster and the cluster lock can be released - # - # (dead-engine) --> Action --> NodeLock - # |action|owner| |node |action| - # | A1 | E1 | |N1 |A1 | - # --> ClusterLock - # |cluster|action|scope| - # |C1 |[A1] |1 | - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.node.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, 1) - db_api.node_lock_acquire(self.cluster.id, action.id) - - # do it - db_api.dummy_gc(engine_id) - - # assertion - # even a read lock is okay now - observed = db_api.cluster_lock_acquire(self.node.id, UUID2, 1) - self.assertIn(UUID2, observed) - self.assertNotIn(action.id, observed) - - # node can be locked again - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - def test_delete_cluster_lock_and_node_lock_2(self): - # Test the case that an action is about node that also locked a - # cluster and the cluster lock will remain locked - # - # (dead-engine) --> Action --> NodeLock - # |action|owner| |node |action| - # | A1 | E1 | |N1 |A1 | - # --> ClusterLock - # |cluster|action |scope| - # |C1 |[A1, A2]|2 | - # preparation - engine_id = UUID1 - action = shared.create_action(self.ctx, target=self.node.id, - status='RUNNING', owner=engine_id, - project=self.ctx.project_id) - db_api.cluster_lock_acquire(self.cluster.id, action.id, 1) - db_api.cluster_lock_acquire(self.cluster.id, UUID2, 1) - db_api.node_lock_acquire(self.node.id, action.id) - - # do it - db_api.dummy_gc(engine_id) - - # assertion - # a read lock is okay now and cluster lock state not broken - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, 1) - self.assertIn(UUID2, observed) - self.assertIn(UUID3, observed) - self.assertNotIn(action.id, observed) - - # node can be locked again - observed = db_api.node_lock_acquire(self.node.id, UUID2) - self.assertEqual(UUID2, observed) - - new_action = db_api.action_get(self.ctx, action.id) - self.assertEqual('FAILED', new_action.status) - self.assertEqual("Engine failure", new_action.status_reason) - - def test_mult_engine_keep_node_scope_lock(self): - engine1 = UUID1 - engine2 = UUID2 - - node2 = shared.create_node(self.ctx, self.cluster, self.profile) - - c_action = shared.create_action(self.ctx, target=self.cluster.id, - status='WAITING', owner=engine1, - project=self.ctx.project_id) - - n_action_1 = shared.create_action(self.ctx, target=self.node.id, - status='RUNNING', owner=engine1, - project=self.ctx.project_id) - - n_action_2 = shared.create_action(self.ctx, target=node2.id, - status='RUNNING', owner=engine2, - project=self.ctx.project_id) - - db_api.dependency_add(self.ctx, [n_action_1.id, n_action_2.id], - c_action.id) - - db_api.cluster_lock_acquire(self.cluster.id, c_action.id, -1) - db_api.cluster_lock_acquire(self.cluster.id, n_action_1.id, 1) - db_api.cluster_lock_acquire(self.cluster.id, n_action_2.id, 1) - db_api.node_lock_acquire(self.node.id, n_action_1.id) - db_api.node_lock_acquire(node2.id, n_action_2.id) - - # do it - db_api.dummy_gc(engine1) - - # try to acquire cluster scope lock - observed = db_api.cluster_lock_acquire(self.cluster.id, UUID3, -1) - self.assertIn(UUID3, observed) - self.assertEqual(1, len(observed)) - - # try to acquire node scope lock - UUID4 = uuidutils.generate_uuid() - observed = db_api.cluster_lock_acquire(self.node.id, UUID4, 1) - self.assertIn(UUID4, observed) - self.assertEqual(1, len(observed)) - - # node scope lock will be also released - UUID5 = uuidutils.generate_uuid() - observed = db_api.cluster_lock_acquire(node2.id, UUID5, 1) - self.assertIn(UUID5, observed) - self.assertEqual(1, len(observed)) - - # try to acquire node lock - UUID6 = uuidutils.generate_uuid() - observed = db_api.node_lock_acquire(self.node.id, UUID6) - self.assertEqual(UUID6, observed) - - # node locks for actions owned by other engines are still there - UUID7 = uuidutils.generate_uuid() - observed = db_api.node_lock_acquire(node2.id, UUID7) - self.assertNotEqual(UUID7, observed) - self.assertEqual(n_action_2.id, observed) - - # check dependency - dependents = db_api.dependency_get_depended(self.ctx, c_action.id) - self.assertEqual(0, len(dependents)) - - # check action status - new_c_action = db_api.action_get(self.ctx, c_action.id) - self.assertEqual('FAILED', new_c_action.status) - self.assertIsNone(new_c_action.owner) - - new_n_action_1 = db_api.action_get(self.ctx, n_action_1.id) - self.assertEqual('FAILED', new_n_action_1.status) - self.assertIsNone(new_n_action_1.owner) - - new_n_action_2 = db_api.action_get(self.ctx, n_action_2.id) - self.assertEqual('FAILED', new_n_action_2.status) - self.assertIsNone(new_n_action_2.owner) diff --git a/senlin/tests/unit/db/test_migration.py b/senlin/tests/unit/db/test_migration.py deleted file mode 100644 index feb90632c..000000000 --- a/senlin/tests/unit/db/test_migration.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Based on Nova's test_migrations.py - - -import os - -from alembic import command as alembic_api -from alembic import config as alembic_config -from alembic import script as alembic_script -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import test_fixtures -from oslo_log import log as logging - -from senlin.db import sqlalchemy -from senlin.tests.unit.common import base - - -LOG = logging.getLogger(__name__) -ALEMBIC_PATH = os.path.join( - os.path.dirname(sqlalchemy.__file__), 'alembic.ini' -) - - -class SenlinMigrationsWalk( - test_fixtures.OpportunisticDBTestMixin, base.SenlinTestCase, -): - # Migrations can take a long time, particularly on underpowered CI nodes. - # Give them some breathing room. - TIMEOUT_SCALING_FACTOR = 4 - - def setUp(self): - super().setUp() - self.engine = enginefacade.writer.get_engine() - self.config = alembic_config.Config(ALEMBIC_PATH) - self.init_version = '569eb0b8' - - def _migrate_up(self, connection, revision): - if revision == self.init_version: # no tests for the initial revision - alembic_api.upgrade(self.config, revision) - return - - self.assertIsNotNone( - getattr(self, '_check_%s' % revision, None), - ( - 'DB Migration %s does not have a test; you must add one' - ) % revision, - ) - - pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None) - if pre_upgrade: - pre_upgrade(connection) - - alembic_api.upgrade(self.config, revision) - - post_upgrade = getattr(self, '_check_%s' % revision, None) - if post_upgrade: - post_upgrade(connection) - - def _check_6f73af60(self, connection): - pass - - def _check_c3e2bfa76dea(self, connection): - pass - - def _check_ab7b23c67360(self, connection): - pass - - def _check_662f8e74ac6f(self, connection): - pass - - def _check_9dbb563afc4d(self, connection): - pass - - def _check_0c04e812f224(self, connection): - pass - - def _check_5b7cb185e0a5(self, connection): - pass - - def _check_3a04debb8cb1(self, connection): - pass - - def _check_beffe13cf8e5(self, connection): - pass - - def _check_aaa7e7755feb(self, connection): - pass - - def _check_004f8202c264(self, connection): - pass - - def test_single_base_revision(self): - script = alembic_script.ScriptDirectory.from_config(self.config) - self.assertEqual(1, len(script.get_bases())) - - def test_walk_versions(self): - with self.engine.begin() as connection: - self.config.attributes['connection'] = connection - script = alembic_script.ScriptDirectory.from_config(self.config) - revisions = [x.revision for x in script.walk_revisions()] - - # for some reason, 'walk_revisions' gives us the revisions in - # reverse chronological order, so we have to invert this - revisions.reverse() - self.assertEqual(revisions[0], self.init_version) - - for revision in revisions: - LOG.info('Testing revision %s', revision) - self._migrate_up(connection, revision) - - -class TestMigrationsWalkSQLite( - SenlinMigrationsWalk, - test_fixtures.OpportunisticDBTestMixin, -): - pass diff --git a/senlin/tests/unit/db/test_node_api.py b/senlin/tests/unit/db/test_node_api.py deleted file mode 100644 index 098f97c33..000000000 --- a/senlin/tests/unit/db/test_node_api.py +++ /dev/null @@ -1,681 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_serialization import jsonutils -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -UUID1 = shared.UUID1 -UUID2 = shared.UUID2 -UUID3 = shared.UUID3 - - -class DBAPINodeTest(base.SenlinTestCase): - def setUp(self): - super(DBAPINodeTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - - def test_node_create(self): - res = shared.create_node(self.ctx, self.cluster, self.profile) - - node = db_api.node_get(self.ctx, res.id) - - self.assertIsNotNone(node) - self.assertEqual('test_node_name', node.name) - self.assertEqual(UUID1, node.physical_id) - self.assertEqual(1, node.index) - self.assertIsNone(node.role) - self.assertIsNone(node.created_at) - self.assertIsNone(node.updated_at) - self.assertEqual('ACTIVE', node.status) - self.assertEqual('create complete', node.status_reason) - self.assertEqual('{"foo": "123"}', jsonutils.dumps(node.meta_data)) - self.assertEqual('{"key1": "value1"}', jsonutils.dumps(node.data)) - self.assertEqual(self.cluster.id, node.cluster_id) - self.assertEqual(self.profile.id, node.profile_id) - - def test_node_get(self): - res = shared.create_node(self.ctx, self.cluster, self.profile) - - node = db_api.node_get(self.ctx, res.id) - self.assertIsNotNone(node) - - node = db_api.node_get(self.ctx, UUID2) - self.assertIsNone(node) - - def test_node_get_diff_project(self): - res = shared.create_node(self.ctx, self.cluster, self.profile) - node = db_api.node_get(self.ctx, res.id) - self.assertIsNotNone(node) - - ctx_new = utils.dummy_context(project='a_different_project') - node = db_api.node_get(ctx_new, res.id) - self.assertIsNone(node) - node = db_api.node_get(ctx_new, res.id, project_safe=False) - self.assertIsNotNone(node) - - def test_node_get_with_admin_context(self): - res = shared.create_node(self.ctx, self.cluster, self.profile) - admin_ctx = utils.dummy_context(project='a_different_project', - is_admin=True) - node = db_api.node_get(admin_ctx, res.id, project_safe=True) - self.assertIsNotNone(node) - - node = db_api.node_get(admin_ctx, res.id, project_safe=False) - self.assertIsNotNone(node) - - def test_node_get_by_name(self): - shared.create_node(self.ctx, self.cluster, self.profile) - node = db_api.node_get_by_name(self.ctx, 'test_node_name') - self.assertIsNotNone(node) - self.assertEqual('test_node_name', node.name) - self.assertEqual(self.cluster.id, node.cluster_id) - - res = db_api.node_get_by_name(self.ctx, 'BogusName') - self.assertIsNone(res) - - # duplicated name - shared.create_node(self.ctx, self.cluster, self.profile) - self.assertRaises(exception.MultipleChoices, - db_api.node_get_by_name, - self.ctx, 'test_node_name') - - def test_node_get_by_name_diff_project(self): - shared.create_node(self.ctx, self.cluster, self.profile) - res = db_api.node_get_by_name(self.ctx, 'test_node_name') - self.assertIsNotNone(res) - - ctx_new = utils.dummy_context(project='a_different_project') - res = db_api.node_get_by_name(ctx_new, 'test_node_name') - self.assertIsNone(res) - res = db_api.node_get_by_name(ctx_new, 'test_node_name', - project_safe=False) - self.assertIsNotNone(res) - - def test_node_get_by_short_id(self): - node_id1 = 'same-part-unique-part' - node_id2 = 'same-part-part-unique' - shared.create_node(self.ctx, None, self.profile, - id=node_id1, name='node-1') - shared.create_node(self.ctx, None, self.profile, - id=node_id2, name='node-2') - - for x in range(len('same-part-')): - self.assertRaises(exception.MultipleChoices, - db_api.node_get_by_short_id, - self.ctx, node_id1[:x]) - - res = db_api.node_get_by_short_id(self.ctx, node_id1[:11]) - self.assertEqual(node_id1, res.id) - res = db_api.node_get_by_short_id(self.ctx, node_id2[:11]) - self.assertEqual(node_id2, res.id) - res = db_api.node_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(res) - - def test_node_get_by_short_id_diff_project(self): - node_id = 'same-part-unique-part' - shared.create_node(self.ctx, None, self.profile, - id=node_id, name='node-1') - res = db_api.node_get_by_short_id(self.ctx, node_id[:11]) - self.assertIsNotNone(res) - - ctx_new = utils.dummy_context(project='a_different_project') - res = db_api.node_get_by_short_id(ctx_new, node_id[:11]) - self.assertIsNone(res) - res = db_api.node_get_by_short_id(ctx_new, node_id[:11], - project_safe=False) - self.assertIsNotNone(res) - - def test_node_get_by_short_id_admin_context(self): - node_id = 'same-part-unique-part' - shared.create_node(self.ctx, None, self.profile, - id=node_id, name='node-1') - - admin_ctx = utils.dummy_context(project='a_different_project', - is_admin=True) - res = db_api.node_get_by_short_id(admin_ctx, node_id[:11], - project_safe=True) - self.assertIsNotNone(res) - - res = db_api.node_get_by_short_id(admin_ctx, node_id[:11], - project_safe=False) - self.assertIsNotNone(res) - - def test_node_get_all(self): - values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}] - [shared.create_node(self.ctx, None, self.profile, **v) for v in values] - - nodes = db_api.node_get_all(self.ctx) - self.assertEqual(3, len(nodes)) - - names = [node.name for node in nodes] - [self.assertIn(val['name'], names) for val in values] - - def test_node_add_node_dependents(self): - node_id = 'host_node' - node = shared.create_node(self.ctx, None, self.profile, - id=node_id, name='node-1') - db_api.node_add_dependents(self.ctx, node_id, 'NODE1') - node = db_api.node_get(self.ctx, node_id) - nodes = node.dependents['nodes'] - self.assertEqual(['NODE1'], nodes) - - db_api.node_add_dependents(self.ctx, node_id, 'NODE2') - new_node = db_api.node_get(self.ctx, node_id) - nodes = new_node.dependents['nodes'] - self.assertEqual(['NODE1', 'NODE2'], nodes) - - def test_node_add_profile_dependents(self): - node_id = 'host_node' - new_profile = shared.create_profile(self.ctx) - node = shared.create_node(self.ctx, None, self.profile, - id=node_id, name='node-1') - db_api.node_add_dependents(self.ctx, node_id, new_profile.id, - 'profile') - node = db_api.node_get(self.ctx, node_id) - nodes = node.dependents['profiles'] - self.assertEqual([new_profile.id], nodes) - - new_profile_1 = shared.create_profile(self.ctx) - db_api.node_add_dependents(self.ctx, node_id, new_profile_1.id, - 'profile') - new_node = db_api.node_get(self.ctx, node_id) - nodes = new_node.dependents['profiles'] - self.assertEqual([new_profile.id, new_profile_1.id], nodes) - - def test_node_remove_node_dependents(self): - node_id = 'host_node' - dependents = {'nodes': ['NODE1', 'NODE2']} - node = shared.create_node(self.ctx, None, self.profile, - id=node_id, dependents=dependents) - db_api.node_remove_dependents(self.ctx, node_id, 'NODE1') - node = db_api.node_get(self.ctx, node_id) - dependents = node.dependents - self.assertEqual({'nodes': ['NODE2']}, dependents) - - db_api.node_remove_dependents(self.ctx, node_id, 'NODE2', 'node') - node = db_api.node_get(self.ctx, node_id) - dependents = node.dependents - self.assertEqual({}, dependents) - - def test_node_remove_profile_dependents(self): - node_id = 'host_node' - dependents = {'profiles': ['P1', 'P2']} - node = shared.create_node(self.ctx, None, self.profile, - id=node_id, dependents=dependents) - db_api.node_remove_dependents(self.ctx, node_id, 'P1', 'profile') - node = db_api.node_get(self.ctx, node_id) - dependents = node.dependents - self.assertEqual({'profiles': ['P2']}, dependents) - - db_api.node_remove_dependents(self.ctx, node_id, 'P2', 'profile') - node = db_api.node_get(self.ctx, node_id) - dependents = node.dependents - self.assertEqual({}, dependents) - - def test_node_get_all_with_cluster_id(self): - values = [{'name': 'node1'}, {'name': 'node2'}, {'name': 'node3'}] - for v in values: - shared.create_node(self.ctx, self.cluster, self.profile, **v) - shared.create_node(self.ctx, None, self.profile, name='node0') - - nodes = db_api.node_get_all(self.ctx, cluster_id=self.cluster.id) - self.assertEqual(3, len(nodes)) - - names = [node.name for node in nodes] - [self.assertIn(val['name'], names) for val in values] - - def test_node_get_all_with_limit_marker(self): - node_ids = ['node1', 'node2', 'node3'] - for v in node_ids: - shared.create_node(self.ctx, self.cluster, self.profile, - id=v, init_at=tu.utcnow(True)) - - nodes = db_api.node_get_all(self.ctx, limit=1) - self.assertEqual(1, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, limit=2) - self.assertEqual(2, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, limit=5) - self.assertEqual(3, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, marker='node1') - self.assertEqual(2, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, marker='node2') - self.assertEqual(1, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, marker='node3') - self.assertEqual(0, len(nodes)) - - nodes = db_api.node_get_all(self.ctx, limit=1, marker='node1') - self.assertEqual(1, len(nodes)) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_node_get_all_used_sort_keys(self, mock_paginate): - node_ids = ['node1', 'node2', 'node3'] - for v in node_ids: - shared.create_node(self.ctx, self.cluster, self.profile, id=v) - - sort = ','.join(consts.NODE_SORT_KEYS) - - db_api.node_get_all(self.ctx, sort=sort) - args = mock_paginate.call_args[0] - used_sort_keys = set(args[3]) - sort_keys = consts.NODE_SORT_KEYS - sort_keys.append('id') - expected_keys = set(sort_keys) - self.assertEqual(expected_keys, used_sort_keys) - - def test_node_get_all_sorting(self): - values = [{'id': '001', 'name': 'node1', 'status': 'ACTIVE'}, - {'id': '002', 'name': 'node3', 'status': 'ERROR'}, - {'id': '003', 'name': 'node2', 'status': 'UPDATING'}] - for v in values: - shared.create_node(self.ctx, self.cluster, self.profile, **v) - - nodes = db_api.node_get_all(self.ctx, sort='name,status') - self.assertEqual(3, len(nodes)) - # Sorted by name - self.assertEqual('001', nodes[0].id) - self.assertEqual('003', nodes[1].id) - self.assertEqual('002', nodes[2].id) - - nodes = db_api.node_get_all(self.ctx, sort='status,name') - self.assertEqual(3, len(nodes)) - # Sorted by statuses (ascending) - self.assertEqual('001', nodes[0].id) - self.assertEqual('002', nodes[1].id) - self.assertEqual('003', nodes[2].id) - - nodes = db_api.node_get_all(self.ctx, sort='status:desc,name:desc') - self.assertEqual(3, len(nodes)) - # Sorted by statuses (descending) - self.assertEqual('003', nodes[0].id) - self.assertEqual('002', nodes[1].id) - self.assertEqual('001', nodes[2].id) - - def test_node_get_all_default_sorting(self): - nodes = [shared.create_node(self.ctx, None, self.profile, - init_at=tu.utcnow(True)) - for x in range(3)] - - results = db_api.node_get_all(self.ctx) - self.assertEqual(3, len(results)) - self.assertEqual(nodes[0].id, results[0].id) - self.assertEqual(nodes[1].id, results[1].id) - self.assertEqual(nodes[2].id, results[2].id) - - def test_node_get_all_with_filters(self): - shared.create_node(self.ctx, None, self.profile, name='node1') - shared.create_node(self.ctx, None, self.profile, name='node2') - - filters = {'name': ['node1', 'nodex']} - results = db_api.node_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('node1', results[0]['name']) - - filters = {'name': 'node1'} - results = db_api.node_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('node1', results[0]['name']) - - def test_node_get_all_with_empty_filters(self): - shared.create_node(self.ctx, None, self.profile, name='node1') - shared.create_node(self.ctx, None, self.profile, name='node2') - - filters = None - results = db_api.node_get_all(self.ctx, filters=filters) - self.assertEqual(2, len(results)) - - def test_node_get_all_with_project_safe(self): - shared.create_node(self.ctx, None, self.profile, name='node1') - shared.create_node(self.ctx, None, self.profile, name='node2') - - self.ctx.project_id = 'a-different-project' - results = db_api.node_get_all(self.ctx, project_safe=False) - self.assertEqual(2, len(results)) - - self.ctx.project_id = 'a-different-project' - results = db_api.node_get_all(self.ctx) - self.assertEqual(0, len(results)) - - results = db_api.node_get_all(self.ctx, project_safe=True) - self.assertEqual(0, len(results)) - - def test_node_get_all_with_admin_context(self): - shared.create_node(self.ctx, None, self.profile, name='node1') - shared.create_node(self.ctx, None, self.profile, name='node2') - - admin_ctx = utils.dummy_context(project='a_different_project', - is_admin=True) - results = db_api.node_get_all(admin_ctx, project_safe=True) - self.assertEqual(2, len(results)) - - results = db_api.node_get_all(admin_ctx, project_safe=False) - self.assertEqual(2, len(results)) - - def test_get_all_by_cluster(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - - node0 = shared.create_node(self.ctx, None, self.profile) - node1 = shared.create_node(self.ctx, self.cluster, self.profile) - node2 = shared.create_node(self.ctx, self.cluster, self.profile) - node3 = shared.create_node(self.ctx, cluster1, self.profile) - - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(2, len(nodes)) - self.assertEqual(set([node1.id, node2.id]), - set([nodes[0].id, nodes[1].id])) - - # retrieve orphan nodes - nodes = db_api.node_get_all_by_cluster(self.ctx, '') - self.assertEqual(1, len(nodes)) - self.assertEqual(node0.id, nodes[0].id) - - # retrieve all nodes - nodes = db_api.node_get_all_by_cluster(self.ctx, None) - self.assertEqual(4, len(nodes)) - self.assertEqual(node0.id, nodes[0].id) - - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(nodes)) - self.assertEqual(node3.id, nodes[0].id) - - def test_get_all_by_cluster_with_filters(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - - shared.create_node(self.ctx, None, self.profile, role="slave") - node1 = shared.create_node(self.ctx, self.cluster, self.profile, - role="slave") - shared.create_node(self.ctx, self.cluster, self.profile, role="master") - shared.create_node(self.ctx, cluster1, self.profile, role="unknown") - - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id, - filters={"role": ["slave"]}) - self.assertEqual(1, len(nodes)) - self.assertEqual(node1.id, nodes[0].id) - - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id, - filters={"role": "master"}) - self.assertEqual(0, len(nodes)) - - def test_get_all_by_cluster_diff_project(self): - shared.create_cluster(self.ctx, self.profile) - - node1 = shared.create_node(self.ctx, self.cluster, self.profile) - node2 = shared.create_node(self.ctx, self.cluster, self.profile) - - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(2, len(nodes)) - self.assertEqual(set([node1.id, node2.id]), - set([nodes[0].id, nodes[1].id])) - - ctx_new = utils.dummy_context(project='a_different_project') - nodes = db_api.node_get_all_by_cluster(ctx_new, self.cluster.id) - self.assertEqual(0, len(nodes)) - nodes = db_api.node_get_all_by_cluster(ctx_new, self.cluster.id, - project_safe=False) - self.assertEqual(2, len(nodes)) - - def test_get_all_by_cluster_admin_context(self): - shared.create_cluster(self.ctx, self.profile) - - node1 = shared.create_node(self.ctx, self.cluster, self.profile) - node2 = shared.create_node(self.ctx, self.cluster, self.profile) - - admin_ctx = utils.dummy_context(project='a_different_project', - is_admin=True) - nodes = db_api.node_get_all_by_cluster(admin_ctx, self.cluster.id) - self.assertEqual(2, len(nodes)) - - nodes = db_api.node_get_all_by_cluster(admin_ctx, self.cluster.id, - project_safe=False) - self.assertEqual(2, len(nodes)) - self.assertEqual(set([node1.id, node2.id]), - set([nodes[0].id, nodes[1].id])) - - def test_node_count_by_cluster(self): - shared.create_cluster(self.ctx, self.profile) - - shared.create_node(self.ctx, self.cluster, self.profile) - shared.create_node(self.ctx, self.cluster, self.profile) - - res = db_api.node_count_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(2, res) - - def test_node_count_by_cluster_with_filters(self): - shared.create_cluster(self.ctx, self.profile) - - shared.create_node(self.ctx, self.cluster, self.profile, - status='ACTIVE') - shared.create_node(self.ctx, self.cluster, self.profile, - status='ERROR') - - res = db_api.node_count_by_cluster(self.ctx, self.cluster.id, - status='ACTIVE') - self.assertEqual(1, res) - res = db_api.node_count_by_cluster(self.ctx, self.cluster.id, - status='ERROR') - self.assertEqual(1, res) - - def test_node_count_by_cluster_diff_project(self): - ctx_new = utils.dummy_context(project='a_different_project') - shared.create_cluster(self.ctx, self.profile) - - shared.create_node(self.ctx, self.cluster, self.profile) - shared.create_node(self.ctx, self.cluster, self.profile) - - res = db_api.node_count_by_cluster(ctx_new, self.cluster.id) - self.assertEqual(0, res) - - res = db_api.node_count_by_cluster(ctx_new, self.cluster.id, - project_safe=False) - self.assertEqual(2, res) - - def test_node_count_by_cluster_admin_context(self): - shared.create_cluster(self.ctx, self.profile) - shared.create_node(self.ctx, self.cluster, self.profile) - shared.create_node(self.ctx, self.cluster, self.profile) - - admin_ctx = utils.dummy_context(project='a_different_project', - is_admin=True) - res = db_api.node_count_by_cluster(admin_ctx, self.cluster.id, - project_safe=True) - self.assertEqual(2, res) - - res = db_api.node_count_by_cluster(admin_ctx, self.cluster.id, - project_safe=False) - self.assertEqual(2, res) - - def test_ids_by_cluster(self): - node0 = shared.create_node(self.ctx, None, self.profile) - node1 = shared.create_node(self.ctx, self.cluster, self.profile) - node2 = shared.create_node(self.ctx, self.cluster, self.profile) - - results = db_api.node_ids_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(2, len(results)) - self.assertEqual(set([node1.id, node2.id]), set(results)) - - # retrieve orphan nodes - results = db_api.node_ids_by_cluster(self.ctx, '') - self.assertEqual(1, len(results)) - self.assertEqual(node0.id, results[0]) - - def test_ids_by_cluster_with_filters(self): - node0 = shared.create_node(self.ctx, None, self.profile, - role='slave') - node1 = shared.create_node(self.ctx, self.cluster, self.profile, - role='master') - shared.create_node(self.ctx, self.cluster, self.profile) - - results = db_api.node_ids_by_cluster(self.ctx, self.cluster.id, - filters={'role': 'master'}) - self.assertEqual(1, len(results)) - self.assertEqual(node1.id, results[0]) - - # retrieve orphan nodes - results = db_api.node_ids_by_cluster(self.ctx, '') - self.assertEqual(1, len(results)) - self.assertEqual(node0.id, results[0]) - - def test_node_update(self): - node = shared.create_node(self.ctx, self.cluster, self.profile) - new_attributes = { - 'name': 'new node name', - 'status': 'bad status', - 'role': 'a new role', - } - db_api.node_update(self.ctx, node.id, new_attributes) - - node = db_api.node_get(self.ctx, node.id) - self.assertEqual('new node name', node.name) - self.assertEqual('bad status', node.status) - self.assertEqual('a new role', node.role) - - def test_node_update_not_found(self): - new_attributes = {'name': 'new_name'} - ex = self.assertRaises(exception.ResourceNotFound, - db_api.node_update, - self.ctx, 'BogusId', new_attributes) - self.assertEqual("The node 'BogusId' could not be found.", - str(ex)) - - def test_node_update_cluster_status_updated(self): - cluster = db_api.cluster_get(self.ctx, self.cluster.id) - self.assertEqual('INIT', cluster.status) - - node = shared.create_node(self.ctx, self.cluster, self.profile) - - new_attributes = { - 'name': 'new_name', - 'status': 'ERROR', - 'status_reason': 'Something is wrong', - } - - db_api.node_update(self.ctx, node.id, new_attributes) - - node = db_api.node_get(self.ctx, node.id) - self.assertEqual('new_name', node.name) - self.assertEqual('ERROR', node.status) - self.assertEqual('Something is wrong', node.status_reason) - - cluster = db_api.cluster_get(self.ctx, self.cluster.id) - self.assertEqual('WARNING', cluster.status) - reason = 'Node new_name: Something is wrong' - self.assertEqual(reason, cluster.status_reason) - - def test_node_migrate_from_none(self): - node_orphan = shared.create_node(self.ctx, None, self.profile) - timestamp = tu.utcnow(True) - - node = db_api.node_migrate(self.ctx, node_orphan.id, self.cluster.id, - timestamp, 'NEW-ROLE') - cluster = db_api.cluster_get(self.ctx, self.cluster.id) - self.assertEqual(timestamp, node.updated_at) - self.assertEqual(self.cluster.id, node.cluster_id) - self.assertEqual(2, cluster.next_index) - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(1, len(nodes)) - self.assertEqual('NEW-ROLE', nodes[0].role) - - def test_node_migrate_to_none(self): - node = shared.create_node(self.ctx, self.cluster, self.profile) - timestamp = tu.utcnow(True) - - node_new = db_api.node_migrate(self.ctx, node.id, None, timestamp) - self.assertEqual(timestamp, node_new.updated_at) - self.assertEqual('', node_new.cluster_id) - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(0, len(nodes)) - - def test_node_migrate_between_clusters(self): - cluster1 = shared.create_cluster(self.ctx, self.profile) - cluster2 = shared.create_cluster(self.ctx, self.profile) - - node = shared.create_node(self.ctx, cluster1, self.profile) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(nodes)) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(0, len(nodes)) - # Refresh cluster1 and cluster2 - cluster1 = db_api.cluster_get(self.ctx, cluster1.id) - cluster2 = db_api.cluster_get(self.ctx, cluster2.id) - self.assertEqual(2, cluster1.next_index) - self.assertEqual(1, cluster2.next_index) - - timestamp = tu.utcnow(True) - - node_new = db_api.node_migrate(self.ctx, node.id, cluster2.id, - timestamp) - cluster1 = db_api.cluster_get(self.ctx, cluster1.id) - cluster2 = db_api.cluster_get(self.ctx, cluster2.id) - self.assertEqual(timestamp, node_new.updated_at) - self.assertEqual(cluster2.id, node_new.cluster_id) - self.assertIsNone(node_new.role) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(0, len(nodes)) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(1, len(nodes)) - self.assertEqual(2, cluster1.next_index) - self.assertEqual(2, cluster2.next_index) - - # Migrate it back! - timestamp = tu.utcnow(True) - - node_new = db_api.node_migrate(self.ctx, node.id, cluster1.id, - timestamp, 'FAKE-ROLE') - cluster1 = db_api.cluster_get(self.ctx, cluster1.id) - cluster2 = db_api.cluster_get(self.ctx, cluster2.id) - self.assertEqual(timestamp, node_new.updated_at) - self.assertEqual(cluster1.id, node_new.cluster_id) - self.assertEqual('FAKE-ROLE', node_new.role) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster1.id) - self.assertEqual(1, len(nodes)) - nodes = db_api.node_get_all_by_cluster(self.ctx, cluster2.id) - self.assertEqual(0, len(nodes)) - self.assertEqual(3, cluster1.next_index) - self.assertEqual(2, cluster2.next_index) - - def test_node_delete(self): - node = shared.create_node(self.ctx, self.cluster, self.profile) - node_id = node.id - - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(1, len(nodes)) - - db_api.node_delete(self.ctx, node_id) - res = db_api.node_get(self.ctx, node_id) - self.assertIsNone(res) - - nodes = db_api.node_get_all_by_cluster(self.ctx, self.cluster.id) - self.assertEqual(0, len(nodes)) - - def test_node_delete_not_found(self): - node_id = 'BogusNodeID' - res = db_api.node_delete(self.ctx, node_id) - self.assertIsNone(res) - - res = db_api.node_get(self.ctx, node_id) - self.assertIsNone(res) diff --git a/senlin/tests/unit/db/test_policy_api.py b/senlin/tests/unit/db/test_policy_api.py deleted file mode 100644 index 4d5a2782c..000000000 --- a/senlin/tests/unit/db/test_policy_api.py +++ /dev/null @@ -1,413 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - -sample_spec = { - 'min_size': 1, - 'max_size': 10, - 'pause_time': 'PT10M', -} - - -class DBAPIPolicyTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIPolicyTest, self).setUp() - self.ctx = utils.dummy_context() - self.profile = shared.create_profile(self.ctx) - self.cluster = shared.create_cluster(self.ctx, self.profile) - - def new_policy_data(self, **kwargs): - data = { - 'name': 'test_policy', - 'type': 'ScalingPolicy', - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - 'spec': sample_spec, - 'data': None, - } - - data.update(kwargs) - return data - - def test_policy_create(self): - data = self.new_policy_data() - policy = db_api.policy_create(self.ctx, data) - - self.assertIsNotNone(policy) - self.assertEqual(data['name'], policy.name) - self.assertEqual(data['type'], policy.type) - self.assertEqual(data['spec'], policy.spec) - self.assertEqual(10, policy.spec['max_size']) - self.assertIsNone(policy.data) - - def test_policy_get(self): - data = self.new_policy_data() - policy = db_api.policy_create(self.ctx, data) - - retobj = db_api.policy_get(self.ctx, policy.id) - self.assertIsNotNone(retobj) - self.assertEqual(data['name'], retobj.name) - self.assertEqual(data['type'], retobj.type) - self.assertEqual(data['spec'], retobj.spec) - self.assertEqual(10, retobj.spec['max_size']) - self.assertIsNone(retobj.data) - - def test_policy_get_diff_project(self): - data = self.new_policy_data() - policy = db_api.policy_create(self.ctx, data) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.policy_get(new_ctx, policy.id) - self.assertIsNone(res) - res = db_api.policy_get(new_ctx, policy.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(policy.id, res.id) - - def test_policy_get_admin_context(self): - data = self.new_policy_data() - policy = db_api.policy_create(self.ctx, data) - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - res = db_api.policy_get(admin_ctx, policy.id, project_safe=True) - self.assertIsNotNone(res) - - res = db_api.policy_get(admin_ctx, policy.id, project_safe=False) - self.assertIsNotNone(res) - - def test_policy_get_not_found(self): - retobj = db_api.policy_get(self.ctx, 'BogusID') - self.assertIsNone(retobj) - - def test_policy_get_by_name(self): - policy_name = 'my_best_policy' - data = self.new_policy_data(name=policy_name) - - # before creation - policy = db_api.policy_get_by_name(self.ctx, policy_name) - self.assertIsNone(policy) - - policy = db_api.policy_create(self.ctx, data) - - # after creation - retobj = db_api.policy_get_by_name(self.ctx, policy_name) - self.assertIsNotNone(retobj) - self.assertEqual(policy_name, retobj.name) - - # bad name - retobj = db_api.policy_get_by_name(self.ctx, 'non-exist') - self.assertIsNone(retobj) - - # duplicated name - db_api.policy_create(self.ctx, data) - self.assertRaises(exception.MultipleChoices, - db_api.policy_get_by_name, - self.ctx, policy_name) - - def test_policy_get_by_name_diff_project(self): - policy_name = 'my_best_policy' - data = self.new_policy_data(name=policy_name) - policy = db_api.policy_create(self.ctx, data) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.policy_get_by_name(new_ctx, policy_name) - self.assertIsNone(res) - res = db_api.policy_get_by_name(new_ctx, policy_name, - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(policy.id, res.id) - - def test_policy_get_by_short_id(self): - policy_ids = ['same-part-unique-part', - 'same-part-part-unique'] - - for pid in policy_ids: - data = self.new_policy_data(id=pid) - db_api.policy_create(self.ctx, data) - - # verify creation with set ID - policy = db_api.policy_get(self.ctx, pid) - self.assertIsNotNone(policy) - self.assertEqual(pid, policy.id) - - # too short -> multiple choices - for x in range(len('same-part-')): - self.assertRaises(exception.MultipleChoices, - db_api.policy_get_by_short_id, - self.ctx, policy_ids[0][:x]) - - # ids are unique - policy = db_api.policy_get_by_short_id(self.ctx, policy_ids[0][:11]) - self.assertEqual(policy_ids[0], policy.id) - policy = db_api.policy_get_by_short_id(self.ctx, policy_ids[1][:11]) - self.assertEqual(policy_ids[1], policy.id) - - # bad ids - res = db_api.policy_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(res) - - def test_policy_get_by_short_id_diff_project(self): - policy_id = 'same-part-unique-part' - data = self.new_policy_data(id=policy_id) - db_api.policy_create(self.ctx, data) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.policy_get_by_short_id(new_ctx, policy_id[0][:11]) - self.assertIsNone(res) - res = db_api.policy_get_by_short_id(new_ctx, policy_id[0][:11], - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(policy_id, res.id) - - def test_policy_get_all(self): - specs = [ - {'name': 'policy_short', 'cooldown': '10'}, - {'name': 'policy_long', 'cooldown': '100'}, - ] - - for spec in specs: - data = self.new_policy_data(**spec) - db_api.policy_create(self.ctx, data) - - policies = db_api.policy_get_all(self.ctx) - self.assertEqual(2, len(policies)) - names = [p.name for p in policies] - for spec in specs: - self.assertIn(spec['name'], names) - - db_api.policy_delete(self.ctx, policies[1].id) - - # after delete one of them - policies = db_api.policy_get_all(self.ctx) - self.assertEqual(1, len(policies)) - - # after delete both policies - db_api.policy_delete(self.ctx, policies[0].id) - - policies = db_api.policy_get_all(self.ctx) - self.assertEqual(0, len(policies)) - - def test_policy_get_all_diff_project(self): - specs = [ - {'name': 'policy_short', 'cooldown': '10'}, - {'name': 'policy_long', 'cooldown': '100'}, - ] - - for spec in specs: - data = self.new_policy_data(**spec) - db_api.policy_create(self.ctx, data) - - new_ctx = utils.dummy_context(project='a-different-project') - policies = db_api.policy_get_all(new_ctx) - self.assertEqual(0, len(policies)) - policies = db_api.policy_get_all(new_ctx, project_safe=False) - self.assertEqual(2, len(policies)) - - def test_policy_get_all_admin_context(self): - specs = [ - {'name': 'policy_short', 'cooldown': '10'}, - {'name': 'policy_long', 'cooldown': '100'}, - ] - - for spec in specs: - data = self.new_policy_data(**spec) - db_api.policy_create(self.ctx, data) - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - policies = db_api.policy_get_all(admin_ctx, project_safe=True) - self.assertEqual(2, len(policies)) - - policies = db_api.policy_get_all(admin_ctx, project_safe=False) - self.assertEqual(2, len(policies)) - - def test_policy_get_all_with_limit_marker(self): - ids = ['policy1', 'policy2', 'policy3'] - for pid in ids: - timestamp = tu.utcnow(True) - data = self.new_policy_data(id=pid, created_at=timestamp) - db_api.policy_create(self.ctx, data) - - # different limit settings - policies = db_api.policy_get_all(self.ctx, limit=1) - self.assertEqual(1, len(policies)) - - policies = db_api.policy_get_all(self.ctx, limit=2) - self.assertEqual(2, len(policies)) - - # a large limit - policies = db_api.policy_get_all(self.ctx, limit=5) - self.assertEqual(3, len(policies)) - - # use marker here - policies = db_api.policy_get_all(self.ctx, marker='policy1') - self.assertEqual(2, len(policies)) - - policies = db_api.policy_get_all(self.ctx, marker='policy2') - self.assertEqual(1, len(policies)) - - policies = db_api.policy_get_all(self.ctx, marker='policy3') - self.assertEqual(0, len(policies)) - - policies = db_api.policy_get_all(self.ctx, limit=1, marker='policy1') - self.assertEqual(1, len(policies)) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_policy_get_all_used_sort_keys(self, mock_paginate): - ids = ['policy1', 'policy2', 'policy3'] - for pid in ids: - data = self.new_policy_data(id=pid) - db_api.policy_create(self.ctx, data) - - sort_keys = consts.POLICY_SORT_KEYS - db_api.policy_get_all(self.ctx, sort=','.join(sort_keys)) - - args = mock_paginate.call_args[0] - used_sort_keys = set(args[3]) - sort_keys.append('id') - expected_keys = set(sort_keys) - self.assertEqual(expected_keys, used_sort_keys) - - def test_policy_get_all_sorting(self): - values = [{'id': '001', 'name': 'policy1'}, - {'id': '002', 'name': 'policy3'}, - {'id': '003', 'name': 'policy2'}] - - for v in values: - v['created_at'] = tu.utcnow(True) - data = self.new_policy_data(**v) - db_api.policy_create(self.ctx, data) - - # Sorted by name - policies = db_api.policy_get_all(self.ctx, sort='name') - self.assertEqual(3, len(policies)) - self.assertEqual('001', policies[0].id) - self.assertEqual('003', policies[1].id) - self.assertEqual('002', policies[2].id) - - # Sorted by created_at and name (ascending) - policies = db_api.policy_get_all(self.ctx, sort='created_at,name') - self.assertEqual(3, len(policies)) - self.assertEqual('001', policies[0].id) - self.assertEqual('002', policies[1].id) - self.assertEqual('003', policies[2].id) - - # Sorted by name (descending) - policies = db_api.policy_get_all(self.ctx, sort='name:desc') - self.assertEqual(3, len(policies)) - self.assertEqual('002', policies[0].id) - self.assertEqual('003', policies[1].id) - self.assertEqual('001', policies[2].id) - - def test_policy_get_all_default_sorting(self): - policies = [] - for x in range(3): - data = self.new_policy_data(created_at=tu.utcnow(True)) - policies.append(db_api.policy_create(self.ctx, data)) - - results = db_api.policy_get_all(self.ctx) - self.assertEqual(3, len(results)) - self.assertEqual(policies[0].id, results[0].id) - self.assertEqual(policies[1].id, results[1].id) - self.assertEqual(policies[2].id, results[2].id) - - def test_policy_get_all_with_filters(self): - for name in ['policy1', 'policy2']: - data = self.new_policy_data(name=name) - db_api.policy_create(self.ctx, data) - - filters = {'name': ['policy1', 'policyx']} - results = db_api.policy_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('policy1', results[0]['name']) - - filters = {'name': 'policy1'} - results = db_api.policy_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('policy1', results[0]['name']) - - def test_policy_get_all_with_empty_filters(self): - for name in ['policy1', 'policy2']: - data = self.new_policy_data(name=name) - db_api.policy_create(self.ctx, data) - - filters = None - results = db_api.policy_get_all(self.ctx, filters=filters) - self.assertEqual(2, len(results)) - - def test_policy_update(self): - another_policy = { - 'name': 'new_scaling_policy', - 'type': 'ScalingPolicy', - 'spec': { - 'min_size': 5, - 'max_size': 15, - } - } - old_data = self.new_policy_data() - old_policy = db_api.policy_create(self.ctx, old_data) - - new_data = self.new_policy_data(**another_policy) - new_policy = db_api.policy_update(self.ctx, old_policy.id, new_data) - - self.assertEqual(old_policy.id, new_policy.id) - self.assertEqual(new_data['name'], new_policy.name) - self.assertEqual('new_scaling_policy', new_policy.name) - - def test_policy_update_not_found(self): - self.assertRaises(exception.ResourceNotFound, - db_api.policy_update, - self.ctx, 'BogusID', {}) - - def test_policy_delete(self): - policy = db_api.policy_create(self.ctx, self.new_policy_data()) - self.assertIsNotNone(policy) - - policy_id = policy.id - db_api.policy_delete(self.ctx, policy_id) - - policy = db_api.policy_get(self.ctx, policy_id) - self.assertIsNone(policy) - - # not found in delete is okay - res = db_api.policy_delete(self.ctx, policy_id) - self.assertIsNone(res) - - def test_policy_delete_in_use(self): - policy = db_api.policy_create(self.ctx, self.new_policy_data()) - self.assertIsNotNone(policy) - - fields = { - 'enabled': True, - } - db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, - fields) - self.assertRaises(exception.EResourceBusy, - db_api.policy_delete, - self.ctx, policy.id) - - db_api.cluster_policy_detach(self.ctx, self.cluster.id, policy.id) - db_api.policy_delete(self.ctx, policy.id) - policy = db_api.policy_get(self.ctx, policy.id) - self.assertIsNone(policy) diff --git a/senlin/tests/unit/db/test_profile_api.py b/senlin/tests/unit/db/test_profile_api.py deleted file mode 100644 index 9c0b66347..000000000 --- a/senlin/tests/unit/db/test_profile_api.py +++ /dev/null @@ -1,370 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.engine import parser -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit.db import shared - - -class DBAPIProfileTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIProfileTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_profile_create(self): - data = parser.simple_parse(shared.sample_profile) - profile = shared.create_profile(self.ctx) - self.assertIsNotNone(profile.id) - self.assertEqual(data['name'], profile.name) - self.assertEqual(data['type'], profile.type) - self.assertEqual(data['spec'], profile.spec) - - def test_profile_get(self): - profile = shared.create_profile(self.ctx) - retobj = db_api.profile_get(self.ctx, profile.id) - self.assertEqual(profile.id, retobj.id) - self.assertEqual(profile.spec, retobj.spec) - - def test_profile_get_diff_project(self): - profile = shared.create_profile(self.ctx) - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.profile_get(new_ctx, profile.id) - self.assertIsNone(res) - - res = db_api.profile_get(new_ctx, profile.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(profile.id, res.id) - - def test_profile_get_admin_context(self): - profile = shared.create_profile(self.ctx) - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - res = db_api.profile_get(admin_ctx, profile.id, project_safe=True) - self.assertIsNotNone(res) - - res = db_api.profile_get(admin_ctx, profile.id, project_safe=False) - self.assertIsNotNone(res) - - def test_profile_get_not_found(self): - profile = db_api.profile_get(self.ctx, 'BogusProfileID') - self.assertIsNone(profile) - - def test_profile_get_by_name(self): - profile_name = 'my_best_profile' - - # before creation - profile = db_api.profile_get_by_name(self.ctx, profile_name) - self.assertIsNone(profile) - - profile = shared.create_profile(self.ctx, name=profile_name) - - # after creation - retobj = db_api.profile_get_by_name(self.ctx, profile_name) - self.assertIsNotNone(retobj) - self.assertEqual(profile_name, retobj.name) - - # bad name - retobj = db_api.profile_get_by_name(self.ctx, 'non-exist') - self.assertIsNone(retobj) - - # duplicated name - shared.create_profile(self.ctx, name=profile_name) - self.assertRaises(exception.MultipleChoices, - db_api.profile_get_by_name, - self.ctx, profile_name) - - def test_profile_get_by_name_diff_project(self): - profile_name = 'my_best_profile' - shared.create_profile(self.ctx, name=profile_name) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.profile_get_by_name(new_ctx, profile_name) - self.assertIsNone(res) - - res = db_api.profile_get_by_name(new_ctx, profile_name, - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(profile_name, res.name) - - def test_profile_get_by_short_id(self): - profile_ids = ['same-part-unique-part', - 'same-part-part-unique'] - - for pid in profile_ids: - shared.create_profile(self.ctx, id=pid) - - # verify creation with set ID - profile = db_api.profile_get(self.ctx, pid) - self.assertIsNotNone(profile) - self.assertEqual(pid, profile.id) - - # too short -> multiple choices - for x in range(len('same-part-')): - self.assertRaises(exception.MultipleChoices, - db_api.profile_get_by_short_id, - self.ctx, profile_ids[0][:x]) - - # ids are unique - profile = db_api.profile_get_by_short_id(self.ctx, profile_ids[0][:11]) - self.assertEqual(profile_ids[0], profile.id) - profile = db_api.profile_get_by_short_id(self.ctx, profile_ids[1][:11]) - self.assertEqual(profile_ids[1], profile.id) - - # bad ids - res = db_api.profile_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(res) - - def test_profile_get_by_short_id_diff_project(self): - profile_id = 'same-part-unique-part' - shared.create_profile(self.ctx, id=profile_id) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.profile_get_by_short_id(new_ctx, profile_id) - self.assertIsNone(res) - - res = db_api.profile_get_by_short_id(new_ctx, profile_id, - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(profile_id, res.id) - - def test_profile_get_all(self): - ids = ['profile1', 'profile2'] - - for pid in ids: - shared.create_profile(self.ctx, id=pid) - - profiles = db_api.profile_get_all(self.ctx) - self.assertEqual(2, len(profiles)) - profile_ids = [p.id for p in profiles] - for pid in ids: - self.assertIn(pid, profile_ids) - - db_api.profile_delete(self.ctx, profiles[1].id) - - # after delete one of them - profiles = db_api.profile_get_all(self.ctx) - self.assertEqual(1, len(profiles)) - - # after delete both profiles - db_api.profile_delete(self.ctx, profiles[0].id) - - profiles = db_api.profile_get_all(self.ctx) - self.assertEqual(0, len(profiles)) - - def test_profile_get_all_diff_project(self): - ids = ['profile1', 'profile2'] - for pid in ids: - shared.create_profile(self.ctx, id=pid) - - new_ctx = utils.dummy_context(project='a-different-project') - profiles = db_api.profile_get_all(new_ctx) - self.assertEqual(0, len(profiles)) - profiles = db_api.profile_get_all(new_ctx, project_safe=False) - self.assertEqual(2, len(profiles)) - - def test_profile_get_all_admin_context(self): - ids = ['profile1', 'profile2'] - for pid in ids: - shared.create_profile(self.ctx, id=pid) - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - profiles = db_api.profile_get_all(admin_ctx, project_safe=True) - self.assertEqual(2, len(profiles)) - - profiles = db_api.profile_get_all(admin_ctx, project_safe=False) - self.assertEqual(2, len(profiles)) - - def test_profile_get_all_with_limit_marker(self): - ids = ['profile1', 'profile2', 'profile3'] - for pid in ids: - timestamp = tu.utcnow(True) - shared.create_profile(self.ctx, id=pid, created_at=timestamp) - - # different limit settings - profiles = db_api.profile_get_all(self.ctx, limit=1) - self.assertEqual(1, len(profiles)) - - profiles = db_api.profile_get_all(self.ctx, limit=2) - self.assertEqual(2, len(profiles)) - - # a large limit - profiles = db_api.profile_get_all(self.ctx, limit=5) - self.assertEqual(3, len(profiles)) - - # use marker here - profiles = db_api.profile_get_all(self.ctx, marker='profile1') - self.assertEqual(2, len(profiles)) - - profiles = db_api.profile_get_all(self.ctx, marker='profile2') - self.assertEqual(1, len(profiles)) - - profiles = db_api.profile_get_all(self.ctx, marker='profile3') - self.assertEqual(0, len(profiles)) - - profiles = db_api.profile_get_all(self.ctx, limit=1, marker='profile1') - self.assertEqual(1, len(profiles)) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_profile_get_all_used_sort_keys(self, mock_paginate): - ids = ['profile1', 'profile2', 'profile3'] - for pid in ids: - shared.create_profile(self.ctx, id=pid) - - sort_keys = consts.PROFILE_SORT_KEYS - db_api.profile_get_all(self.ctx, sort=','.join(sort_keys)) - - args = mock_paginate.call_args[0] - sort_keys.append('id') - self.assertEqual(set(sort_keys), set(args[3])) - - def test_profile_get_all_sorting(self): - values = [{'id': '001', 'name': 'profile1', 'type': 'C'}, - {'id': '002', 'name': 'profile3', 'type': 'B'}, - {'id': '003', 'name': 'profile2', 'type': 'A'}] - - for v in values: - shared.create_profile(self.ctx, **v) - - # Sorted by name,type - profiles = db_api.profile_get_all(self.ctx, sort='name,type') - self.assertEqual(3, len(profiles)) - self.assertEqual('001', profiles[0].id) - self.assertEqual('003', profiles[1].id) - self.assertEqual('002', profiles[2].id) - - # Sorted by type,name (ascending) - profiles = db_api.profile_get_all(self.ctx, sort='type,name') - self.assertEqual(3, len(profiles)) - self.assertEqual('003', profiles[0].id) - self.assertEqual('002', profiles[1].id) - self.assertEqual('001', profiles[2].id) - - # Sorted by type,name (descending) - profiles = db_api.profile_get_all(self.ctx, sort='type:desc,name:desc') - self.assertEqual(3, len(profiles)) - self.assertEqual('001', profiles[0].id) - self.assertEqual('002', profiles[1].id) - self.assertEqual('003', profiles[2].id) - - def test_profile_get_all_default_sorting(self): - profiles = [] - for x in range(3): - profile = shared.create_profile(self.ctx, - created_at=tu.utcnow(True)) - profiles.append(profile) - - results = db_api.profile_get_all(self.ctx) - self.assertEqual(3, len(results)) - self.assertEqual(profiles[0].id, results[0].id) - self.assertEqual(profiles[1].id, results[1].id) - self.assertEqual(profiles[2].id, results[2].id) - - def test_profile_get_all_with_filters(self): - for name in ['profile1', 'profile2']: - shared.create_profile(self.ctx, name=name) - - filters = {'name': ['profile1', 'profilex']} - results = db_api.profile_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('profile1', results[0]['name']) - - filters = {'name': 'profile1'} - results = db_api.profile_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('profile1', results[0]['name']) - - def test_profile_get_all_with_empty_filters(self): - for name in ['profile1', 'profile2']: - shared.create_profile(self.ctx, name=name) - - filters = None - results = db_api.profile_get_all(self.ctx, filters=filters) - self.assertEqual(2, len(results)) - - def test_profile_update(self): - new_fields = { - 'name': 'test_profile_name_2', - 'type': 'my_test_profile_type', - 'spec': { - 'template': { - 'heat_template_version': '2013-05-23', - 'resources': { - 'myrandom': 'OS::Heat::RandomString', - }, - }, - 'files': { - 'myfile': 'new contents', - }, - }, - } - - old_profile = shared.create_profile(self.ctx) - new_profile = db_api.profile_update(self.ctx, old_profile.id, - new_fields) - - self.assertEqual(old_profile.id, new_profile.id) - self.assertEqual(new_fields['name'], new_profile.name) - self.assertEqual('test_profile_name_2', new_profile.name) - - def test_profile_update_not_found(self): - self.assertRaises(exception.ResourceNotFound, - db_api.profile_update, - self.ctx, 'BogusID', {}) - - def test_profile_delete(self): - profile = shared.create_profile(self.ctx) - self.assertIsNotNone(profile) - profile_id = profile.id - db_api.profile_delete(self.ctx, profile_id) - - profile = db_api.profile_get(self.ctx, profile_id) - self.assertIsNone(profile) - - # not found in delete is okay - res = db_api.profile_delete(self.ctx, profile_id) - self.assertIsNone(res) - - def test_profile_delete_profile_used_by_cluster(self): - profile = shared.create_profile(self.ctx) - cluster = shared.create_cluster(self.ctx, profile) - - profile_id = profile.id - ex = self.assertRaises(exception.EResourceBusy, - db_api.profile_delete, self.ctx, profile_id) - self.assertEqual("The profile '%s' is busy now." % profile_id, - str(ex)) - - db_api.cluster_delete(self.ctx, cluster.id) - db_api.profile_delete(self.ctx, profile_id) - - def test_profile_delete_profile_used_by_node(self): - profile = shared.create_profile(self.ctx) - node = shared.create_node(self.ctx, None, profile) - - profile_id = profile.id - ex = self.assertRaises(exception.EResourceBusy, - db_api.profile_delete, self.ctx, profile_id) - self.assertEqual("The profile '%s' is busy now." % profile_id, - str(ex)) - - db_api.node_delete(self.ctx, node.id) - db_api.profile_delete(self.ctx, profile_id) diff --git a/senlin/tests/unit/db/test_receiver_api.py b/senlin/tests/unit/db/test_receiver_api.py deleted file mode 100644 index 923dd8320..000000000 --- a/senlin/tests/unit/db/test_receiver_api.py +++ /dev/null @@ -1,345 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_db.sqlalchemy import utils as sa_utils -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.common import exception -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class DBAPIReceiverTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIReceiverTest, self).setUp() - self.ctx = utils.dummy_context() - self.type = 'webhook' - self.cluster_id = 'FAKE_ID' - self.action = 'test_action' - - def _create_receiver(self, ctx, type=None, cluster_id=None, action=None, - **kwargs): - values = { - 'name': 'test_receiver', - 'type': type or self.type, - 'user': ctx.user_id, - 'project': ctx.project_id, - 'domain': ctx.domain_id, - 'created_at': None, - 'updated_at': None, - 'cluster_id': cluster_id or self.cluster_id, - 'action': action or self.action, - 'actor': {'username': 'john', 'password': 'secrete1'}, - 'params': {'key1': 'value1'}, - 'channel': {'alarm_url': 'http://url1'} - } - values.update(kwargs) - return db_api.receiver_create(ctx, values) - - def test_receiver_create_and_get(self): - res = self._create_receiver(self.ctx) - r = db_api.receiver_get(self.ctx, res.id) - self.assertIsNotNone(r) - self.assertEqual(self.cluster_id, r.cluster_id) - self.assertEqual('test_receiver', r.name) - self.assertEqual(self.type, r.type) - self.assertEqual(self.ctx.user_id, r.user) - self.assertEqual(self.ctx.project_id, r.project) - self.assertEqual(self.ctx.domain_id, r.domain) - self.assertIsNone(r.created_at) - self.assertIsNone(r.updated_at) - self.assertEqual(self.action, r.action) - self.assertEqual({'username': 'john', 'password': 'secrete1'}, r.actor) - self.assertEqual({'key1': 'value1'}, r.params) - self.assertEqual({'alarm_url': 'http://url1'}, r.channel) - - def test_receiver_get_diff_project(self): - new_ctx = utils.dummy_context(project='a-different-project') - r = self._create_receiver(self.ctx) - - res = db_api.receiver_get(new_ctx, r.id) - self.assertIsNone(res) - - res = db_api.receiver_get(new_ctx, r.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(r.id, res.id) - - res = db_api.receiver_get(self.ctx, r.id) - self.assertEqual(r.id, res.id) - - def test_receiver_get_admin_context(self): - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - r = self._create_receiver(self.ctx) - - res = db_api.receiver_get(admin_ctx, r.id, project_safe=True) - self.assertIsNotNone(res) - - res = db_api.receiver_get(admin_ctx, r.id, project_safe=False) - self.assertIsNotNone(res) - - def test_receiver_get_by_short_id(self): - receiver_id1 = 'same-part-unique-part' - receiver_id2 = 'same-part-part-unique' - self._create_receiver(self.ctx, id=receiver_id1, name='receiver-1') - self._create_receiver(self.ctx, id=receiver_id2, name='receiver-2') - - for x in range(len('same-part-')): - self.assertRaises(exception.MultipleChoices, - db_api.receiver_get_by_short_id, - self.ctx, receiver_id1[:x]) - - res = db_api.receiver_get_by_short_id(self.ctx, receiver_id1[:11]) - self.assertEqual(receiver_id1, res.id) - res = db_api.receiver_get_by_short_id(self.ctx, receiver_id2[:11]) - self.assertEqual(receiver_id2, res.id) - res = db_api.receiver_get_by_short_id(self.ctx, 'non-existent') - self.assertIsNone(res) - - def test_receiver_get_by_short_id_diff_project(self): - rid = 'same-part-unique-part' - self._create_receiver(self.ctx, id=rid, name='receiver-1') - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.receiver_get_by_short_id(new_ctx, rid[:11]) - self.assertIsNone(res) - - res = db_api.receiver_get_by_short_id(new_ctx, rid[:11], - project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(rid, res.id) - - def test_receiver_get_by_name(self): - rname = 'fake_receiver_name' - self._create_receiver(self.ctx, name=rname) - receiver = db_api.receiver_get_by_name(self.ctx, rname) - self.assertIsNotNone(receiver) - self.assertEqual(rname, receiver.name) - - # bad name - res = db_api.receiver_get_by_name(self.ctx, 'BogusName') - self.assertIsNone(res) - - # duplicated name - self._create_receiver(self.ctx, name=rname) - self.assertRaises(exception.MultipleChoices, - db_api.receiver_get_by_name, - self.ctx, rname) - - def test_receiver_get_by_name_diff_project(self): - rname = 'fake_receiver_name' - self._create_receiver(self.ctx, name=rname) - - new_ctx = utils.dummy_context(project='a-different-project') - res = db_api.receiver_get_by_name(new_ctx, rname) - self.assertIsNone(res) - - res = db_api.receiver_get_by_name(new_ctx, rname, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(rname, res.name) - - def test_receiver_get_all(self): - values = [{'name': 'receiver1'}, - {'name': 'receiver2'}, - {'name': 'receiver3'}] - [self._create_receiver(self.ctx, **v) for v in values] - - receivers = db_api.receiver_get_all(self.ctx) - self.assertEqual(3, len(receivers)) - - names = [receiver.name for receiver in receivers] - for val in values: - self.assertIn(val['name'], names) - - def test_receiver_get_all_with_limit_marker(self): - receiver_ids = ['receiver1', 'receiver2', 'receiver3'] - for v in receiver_ids: - self._create_receiver(self.ctx, id=v, - created_at=tu.utcnow(True)) - - receivers = db_api.receiver_get_all(self.ctx, limit=1) - self.assertEqual(1, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, limit=2) - self.assertEqual(2, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, limit=5) - self.assertEqual(3, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, marker='receiver1') - self.assertEqual(2, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, marker='receiver2') - self.assertEqual(1, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, marker='receiver3') - self.assertEqual(0, len(receivers)) - - receivers = db_api.receiver_get_all(self.ctx, limit=1, - marker='receiver1') - self.assertEqual(1, len(receivers)) - - @mock.patch.object(sa_utils, 'paginate_query') - def test_receiver_get_all_used_sort_keys(self, mock_paginate): - receiver_ids = ['receiver1', 'receiver2', 'receiver3'] - for v in receiver_ids: - self._create_receiver(self.ctx, id=v) - - sort_keys = consts.RECEIVER_SORT_KEYS - - db_api.receiver_get_all(self.ctx, sort=','.join(sort_keys)) - args = mock_paginate.call_args[0] - sort_keys.append('id') - self.assertEqual(set(sort_keys), set(args[3])) - - def test_receiver_get_all_sorting(self): - values = [{'id': '001', 'name': 'receiver1'}, - {'id': '002', 'name': 'receiver3'}, - {'id': '003', 'name': 'receiver2'}] - obj_ids = {'receiver1': 'id3', - 'receiver2': 'id2', - 'receiver3': 'id1'} - for v in values: - self._create_receiver(self.ctx, cluster_id=obj_ids[v['name']], **v) - - receivers = db_api.receiver_get_all(self.ctx, sort='name,cluster_id') - self.assertEqual(3, len(receivers)) - # Sorted by name (ascending) - self.assertEqual('001', receivers[0].id) - self.assertEqual('003', receivers[1].id) - self.assertEqual('002', receivers[2].id) - - receivers = db_api.receiver_get_all(self.ctx, sort='cluster_id,name') - self.assertEqual(3, len(receivers)) - # Sorted by obj_id (ascending) - self.assertEqual('002', receivers[0].id) - self.assertEqual('003', receivers[1].id) - self.assertEqual('001', receivers[2].id) - - receivers = db_api.receiver_get_all(self.ctx, - sort='cluster_id:desc,name:desc') - self.assertEqual(3, len(receivers)) - # Sorted by obj_id (descending) - self.assertEqual('001', receivers[0].id) - self.assertEqual('003', receivers[1].id) - self.assertEqual('002', receivers[2].id) - - def test_receiver_get_all_sorting_default(self): - values = [{'id': '001', 'name': 'receiver1'}, - {'id': '002', 'name': 'receiver2'}, - {'id': '003', 'name': 'receiver3'}] - obj_ids = {'receiver1': 'id3', - 'receiver2': 'id2', - 'receiver3': 'id1'} - for v in values: - self._create_receiver(self.ctx, cluster_id=obj_ids[v['name']], **v) - - receivers = db_api.receiver_get_all(self.ctx) - self.assertEqual(3, len(receivers)) - self.assertEqual(values[0]['id'], receivers[0].id) - self.assertEqual(values[1]['id'], receivers[1].id) - self.assertEqual(values[2]['id'], receivers[2].id) - - def test_receiver_get_all_with_filters(self): - self._create_receiver(self.ctx, name='receiver1') - self._create_receiver(self.ctx, name='receiver2') - - filters = {'name': ['receiver1', 'receiverx']} - results = db_api.receiver_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('receiver1', results[0]['name']) - - filters = {'name': 'receiver1'} - results = db_api.receiver_get_all(self.ctx, filters=filters) - self.assertEqual(1, len(results)) - self.assertEqual('receiver1', results[0]['name']) - - def test_receiver_get_all_with_empty_filters(self): - self._create_receiver(self.ctx, name='receiver1') - self._create_receiver(self.ctx, name='receiver2') - - filters = None - results = db_api.receiver_get_all(self.ctx, filters=filters) - self.assertEqual(2, len(results)) - - def test_receiver_get_all_with_project_safe(self): - self._create_receiver(self.ctx, name='receiver1') - self._create_receiver(self.ctx, name='receiver2') - - self.ctx.project_id = 'a-different-project' - results = db_api.receiver_get_all(self.ctx, project_safe=False) - self.assertEqual(2, len(results)) - - self.ctx.project_id = 'a-different-project' - results = db_api.receiver_get_all(self.ctx) - self.assertEqual(0, len(results)) - - results = db_api.receiver_get_all(self.ctx, project_safe=True) - self.assertEqual(0, len(results)) - - def test_receiver_get_all_with_admin_context(self): - self._create_receiver(self.ctx, name='receiver1') - self._create_receiver(self.ctx, name='receiver2') - - admin_ctx = utils.dummy_context(project='a-different-project', - is_admin=True) - results = db_api.receiver_get_all(admin_ctx, project_safe=True) - self.assertEqual(2, len(results)) - - results = db_api.receiver_get_all(admin_ctx, project_safe=False) - self.assertEqual(2, len(results)) - - def test_receiver_delete(self): - res = self._create_receiver(self.ctx) - receiver_id = res.id - receiver = db_api.receiver_get(self.ctx, receiver_id) - self.assertIsNotNone(receiver) - - db_api.receiver_delete(self.ctx, receiver_id) - res = db_api.receiver_get(self.ctx, receiver_id) - self.assertIsNone(res) - - def test_receiver_delete_not_found(self): - receiver_id = 'BogusWebhookID' - res = db_api.receiver_delete(self.ctx, receiver_id) - self.assertIsNone(res) - - res = db_api.receiver_get(self.ctx, receiver_id) - self.assertIsNone(res) - - def test_receiver_update(self): - new_values = { - 'name': 'test_receiver2', - 'params': {'key2': 'value2'}, - } - - old_receiver = self._create_receiver(self.ctx) - new_receiver = db_api.receiver_update(self.ctx, old_receiver.id, - new_values) - - self.assertEqual(old_receiver.id, new_receiver.id) - self.assertEqual(new_values['name'], new_receiver.name) - self.assertEqual('test_receiver2', new_receiver.name) - self.assertEqual('value2', new_receiver.params['key2']) - - def test_receiver_update_not_found(self): - new_values = { - 'name': 'test_receiver2', - 'params': {'key2': 'value2'}, - } - self.assertRaises(exception.ResourceNotFound, - db_api.receiver_update, - self.ctx, 'BogusID', new_values) diff --git a/senlin/tests/unit/db/test_registry_api.py b/senlin/tests/unit/db/test_registry_api.py deleted file mode 100644 index f1f59c4f0..000000000 --- a/senlin/tests/unit/db/test_registry_api.py +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.db.sqlalchemy import api as db_api -from senlin.db.sqlalchemy import utils as db_utils -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class DBAPIRegistryTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIRegistryTest, self).setUp() - self.ctx = utils.dummy_context() - - db_api.service_create('SERVICE_ID') - - def _create_registry(self, cluster_id, check_type, interval, params, - engine_id): - return db_api.registry_create(self.ctx, cluster_id, check_type, - interval, params, engine_id) - - def test_registry_create_get(self): - registry = self._create_registry(cluster_id='CLUSTER_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE_ID') - ret_registries = db_api.registry_claim(self.ctx, registry.engine_id) - self.assertEqual(1, len(ret_registries)) - ret_registry = ret_registries[0] - self.assertEqual(registry.id, ret_registry.id) - self.assertEqual(registry.cluster_id, ret_registry.cluster_id) - self.assertEqual(registry.check_type, ret_registry.check_type) - self.assertEqual(registry.interval, ret_registry.interval) - self.assertEqual(registry.params, ret_registry.params) - self.assertEqual(registry.engine_id, ret_registry.engine_id) - - def test_registry_update(self): - self._create_registry(cluster_id='FAKE_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='DEAD_ENGINE') - - registries = db_api.registry_claim(self.ctx, engine_id='ENGINE_ID') - self.assertTrue(registries[0].enabled) - - db_api.registry_update(self.ctx, 'FAKE_ID', {'enabled': False}) - registries = db_api.registry_claim(self.ctx, engine_id='NEW_ENGINE_ID') - self.assertFalse(registries[0].enabled) - - def test_registry_claim(self): - for i in range(2): - cluster_id = 'cluster-%s' % i - self._create_registry(cluster_id=cluster_id, - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='DEAD_ENGINE') - - registries = db_api.registry_claim(self.ctx, engine_id='ENGINE_ID') - self.assertEqual(2, len(registries)) - self.assertEqual('DEAD_ENGINE', registries[0].engine_id) - self.assertEqual('DEAD_ENGINE', registries[1].engine_id) - - @mock.patch.object(db_utils, 'is_service_dead') - def test_registry_claim_with_dead_engine(self, mock_check): - db_api.service_create('SERVICE_ID_DEAD') - self._create_registry( - cluster_id='CLUSTER_1', check_type='NODE_STATUS_POLLING', - interval=60, params={}, engine_id='SERVICE_ID') - self._create_registry( - cluster_id='CLUSTER_1', check_type='NODE_STATUS_POLLING', - interval=60, params={}, engine_id='SERVICE_ID_DEAD') - - mock_check.side_effect = [False, True] - - registries = db_api.registry_claim(self.ctx, engine_id='ENGINE_ID') - - self.assertEqual(1, len(registries)) - self.assertEqual('SERVICE_ID_DEAD', registries[0].engine_id) - - def test_registry_delete(self): - registry = self._create_registry('CLUSTER_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE_ID') - db_api.registry_delete(self.ctx, 'CLUSTER_ID') - self.assertEqual([], db_api.registry_claim(self.ctx, - registry.engine_id)) - - def test_registry_get(self): - obj = self._create_registry(cluster_id='FAKE_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='DEAD_ENGINE') - - registry = db_api.registry_get(self.ctx, 'FAKE_ID') - - self.assertEqual(registry.id, obj.id) - self.assertEqual(registry.cluster_id, obj.cluster_id) - self.assertEqual(registry.check_type, obj.check_type) - self.assertEqual(registry.interval, obj.interval) - self.assertEqual(registry.params, obj.params) - self.assertEqual(registry.engine_id, obj.engine_id) - - def test_registry_get_by_engine(self): - obj = self._create_registry(cluster_id='FAKE_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE') - - registry = db_api.registry_get_by_param( - self.ctx, {"cluster_id": "FAKE_ID", "engine_id": "ENGINE"}) - - self.assertEqual(registry.id, obj.id) - self.assertEqual(registry.cluster_id, obj.cluster_id) - self.assertEqual(registry.check_type, obj.check_type) - self.assertEqual(registry.interval, obj.interval) - self.assertEqual(registry.params, obj.params) - self.assertEqual(registry.engine_id, obj.engine_id) - - def test_registry_list_ids_by_service(self): - for index in range(10): - self._create_registry( - cluster_id='MAIN_FAKE_ID_%d' % index, - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE0' - ) - for index in range(10): - self._create_registry( - cluster_id='FAKE_ID_%d' % (index + 1), - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE%d' - ) - - registries = db_api.registry_list_ids_by_service(self.ctx, 'ENGINE0') - self.assertEqual(10, len(registries)) - for registry in registries: - self.assertIn('MAIN_FAKE_ID', registry.cluster_id) - - def test_registry_list_ids_by_service_is_empty(self): - self._create_registry( - cluster_id='FAKE_ID', - check_type='NODE_STATUS_POLLING', - interval=60, - params={}, - engine_id='ENGINE' - ) - - registries = db_api.registry_list_ids_by_service(self.ctx, 'ENGINE1') - self.assertEqual(0, len(registries)) diff --git a/senlin/tests/unit/db/test_service_api.py b/senlin/tests/unit/db/test_service_api.py deleted file mode 100644 index c15f12986..000000000 --- a/senlin/tests/unit/db/test_service_api.py +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.db.sqlalchemy import api as db_api -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class DBAPIServiceTest(base.SenlinTestCase): - def setUp(self): - super(DBAPIServiceTest, self).setUp() - self.ctx = utils.dummy_context() - - def _create_service(self, service_id=None, **kwargs): - service_id = service_id or 'f9aff81e-bc1f-4119-941d-ad1ea7f31d19' - values = { - 'host': 'host1.devstack.org', - 'binary': 'senlin-engine', - 'topic': 'engine', - } - values.update(kwargs) - - return db_api.service_create( - service_id, host=values.get('host'), - binary=values.get('binary'), - topic=values.get('topic'), - time_now=kwargs.get('time_now') - ) - - def test_service_create_get(self): - service = self._create_service() - - ret_service = db_api.service_get(service.id) - - self.assertIsNotNone(ret_service) - self.assertEqual(service.id, ret_service.id) - self.assertEqual(service.binary, ret_service.binary) - self.assertEqual(service.host, ret_service.host) - self.assertEqual(service.topic, ret_service.topic) - self.assertEqual(service.disabled, ret_service.disabled) - self.assertEqual(service.disabled_reason, ret_service.disabled_reason) - self.assertIsNotNone(service.created_at) - self.assertIsNotNone(service.updated_at) - - def test_service_get_all(self): - for i in range(4): - service_id = uuidutils.generate_uuid() - values = {'host': 'host-%s' % i} - self._create_service(service_id, **values) - - services = db_api.service_get_all() - - self.assertEqual(4, len(services)) - - def test_service_get_all_expired(self): - for index in range(3): - dt = timeutils.utcnow() - datetime.timedelta(hours=8) - values = { - 'binary': 'senlin-health-manager', - 'host': 'host-0-%s' % index, - 'time_now': dt - } - self._create_service(uuidutils.generate_uuid(), **values) - - for index in range(3): - dt = timeutils.utcnow() - values = { - 'binary': 'senlin-health-manager', - 'host': 'host-1-%s' % index, - 'time_now': dt - } - self._create_service(uuidutils.generate_uuid(), **values) - - db_api.service_cleanup_all_expired('senlin-health-manager') - - self.assertEqual(3, len(db_api.service_get_all())) - - def test_service_update(self): - old_service = self._create_service() - self.assertIsNotNone(old_service) - old_updated_time = old_service.updated_at - values = {'host': 'host-updated'} - - new_service = db_api.service_update(old_service.id, values) - - self.assertEqual('host-updated', new_service.host) - self.assertGreater(new_service.updated_at, old_updated_time) - - def test_service_update_values_none(self): - old_service = self._create_service() - old_updated_time = old_service.updated_at - new_service = db_api.service_update(old_service.id) - self.assertGreater(new_service.updated_at, old_updated_time) - - def test_service_delete(self): - service = self._create_service() - - db_api.service_delete(service.id) - - res = db_api.service_get(service.id) - self.assertIsNone(res) diff --git a/senlin/tests/unit/db/test_sqlalchemy_types.py b/senlin/tests/unit/db/test_sqlalchemy_types.py deleted file mode 100644 index baac4653a..000000000 --- a/senlin/tests/unit/db/test_sqlalchemy_types.py +++ /dev/null @@ -1,144 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_utils import timeutils -import pytz -from sqlalchemy.dialects.mysql import base as mysql_base -from sqlalchemy.dialects.sqlite import base as sqlite_base -from sqlalchemy import types -import testtools - -from senlin.db.sqlalchemy import types as db_types - - -class DictTest(testtools.TestCase): - - def setUp(self): - super(DictTest, self).setUp() - self.sqltype = db_types.Dict() - - def test_load_dialect_impl(self): - dialect = mysql_base.MySQLDialect() - impl = self.sqltype.load_dialect_impl(dialect) - self.assertNotEqual(types.Text, type(impl)) - dialect = sqlite_base.SQLiteDialect() - impl = self.sqltype.load_dialect_impl(dialect) - self.assertEqual(types.Text, type(impl)) - - def test_process_bind_param(self): - dialect = None - value = {'foo': 'bar'} - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual('{"foo": "bar"}', result) - - def test_process_bind_param_null(self): - dialect = None - value = None - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual('null', result) - - def test_process_result_value(self): - dialect = None - value = '{"foo": "bar"}' - result = self.sqltype.process_result_value(value, dialect) - self.assertEqual({'foo': 'bar'}, result) - - def test_process_result_value_null(self): - dialect = None - value = None - result = self.sqltype.process_result_value(value, dialect) - self.assertIsNone(result) - - -class ListTest(testtools.TestCase): - - def setUp(self): - super(ListTest, self).setUp() - self.sqltype = db_types.List() - - def test_load_dialect_impl(self): - dialect = mysql_base.MySQLDialect() - impl = self.sqltype.load_dialect_impl(dialect) - self.assertNotEqual(types.Text, type(impl)) - dialect = sqlite_base.SQLiteDialect() - impl = self.sqltype.load_dialect_impl(dialect) - self.assertEqual(types.Text, type(impl)) - - def test_process_bind_param(self): - dialect = None - value = ['foo', 'bar'] - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual('["foo", "bar"]', result) - - def test_process_bind_param_null(self): - dialect = None - value = None - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual('null', result) - - def test_process_result_value(self): - dialect = None - value = '["foo", "bar"]' - result = self.sqltype.process_result_value(value, dialect) - self.assertEqual(['foo', 'bar'], result) - - def test_process_result_value_null(self): - dialect = None - value = None - result = self.sqltype.process_result_value(value, dialect) - self.assertIsNone(result) - - -class TZAwareDateTimeTest(testtools.TestCase): - - def setUp(self): - super(TZAwareDateTimeTest, self).setUp() - self.sqltype = db_types.TZAwareDateTime() - - def test_process_bind_param(self): - dialect = mock.Mock() - dialect.name = 'nonmysql' - value = timeutils.utcnow(True) - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual(value, result) - - def test_process_bind_param_mysql(self): - dialect = mock.Mock() - dialect.name = 'mysql' - value = timeutils.utcnow(True) - expected_value = timeutils.normalize_time(value) - result = self.sqltype.process_bind_param(value, dialect) - self.assertEqual(expected_value, result) - - def test_process_bind_param_mysql_null(self): - dialect = mock.Mock() - dialect.name = 'mysql' - value = None - result = self.sqltype.process_bind_param(value, dialect) - self.assertIsNone(result) - - def test_process_result_value(self): - dialect = None - value = timeutils.utcnow(False) - expected_value = value.replace(tzinfo=pytz.utc) - result = self.sqltype.process_result_value(value, dialect) - self.assertEqual(expected_value, result) - - def test_process_result_value_null(self): - dialect = None - value = None - result = self.sqltype.process_result_value(value, dialect) - self.assertIsNone(result) diff --git a/senlin/tests/unit/db/test_sqlalchemy_utils.py b/senlin/tests/unit/db/test_sqlalchemy_utils.py deleted file mode 100644 index 28b550353..000000000 --- a/senlin/tests/unit/db/test_sqlalchemy_utils.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_utils import timeutils - -from senlin.db.sqlalchemy import utils -from senlin.tests.unit.common import base - - -class ExactFilterTest(base.SenlinTestCase): - - def setUp(self): - super(ExactFilterTest, self).setUp() - self.query = mock.Mock() - self.model = mock.Mock() - - def test_returns_same_query_for_empty_filters(self): - filters = {} - utils.exact_filter(self.query, self.model, filters) - self.assertEqual(0, self.query.call_count) - - def test_add_exact_match_clause_for_single_values(self): - filters = {'cat': 'foo'} - utils.exact_filter(self.query, self.model, filters) - - self.query.filter_by.assert_called_once_with(cat='foo') - - def test_adds_an_in_clause_for_multiple_values(self): - self.model.cat.in_.return_value = 'fake in clause' - filters = {'cat': ['foo', 'quux']} - utils.exact_filter(self.query, self.model, filters) - - self.query.filter.assert_called_once_with('fake in clause') - self.model.cat.in_.assert_called_once_with(['foo', 'quux']) - - -class SortParamTest(base.SenlinTestCase): - - def test_value_none(self): - keys, dirs = utils.get_sort_params(None) - self.assertEqual(['id'], keys) - self.assertEqual(['asc'], dirs) - - def test_value_none_with_default_key(self): - keys, dirs = utils.get_sort_params(None, 'foo') - self.assertEqual(2, len(keys)) - self.assertEqual(2, len(dirs)) - self.assertEqual(['foo', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc'], dirs) - - def test_value_single(self): - keys, dirs = utils.get_sort_params('foo') - self.assertEqual(2, len(keys)) - self.assertEqual(2, len(dirs)) - self.assertEqual(['foo', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc'], dirs) - - def test_value_multiple(self): - keys, dirs = utils.get_sort_params('foo,bar,zoo') - self.assertEqual(4, len(keys)) - self.assertEqual(4, len(dirs)) - self.assertEqual(['foo', 'bar', 'zoo', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc-nullsfirst', 'asc-nullsfirst', - 'asc'], dirs) - - def test_value_multiple_with_dirs(self): - keys, dirs = utils.get_sort_params('foo:asc,bar,zoo:desc') - self.assertEqual(4, len(keys)) - self.assertEqual(4, len(dirs)) - self.assertEqual(['foo', 'bar', 'zoo', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc-nullsfirst', - 'desc-nullslast', 'asc'], dirs) - - def test_value_multiple_with_dirs_and_default_key(self): - keys, dirs = utils.get_sort_params('foo:asc,bar,zoo:desc', 'notused') - self.assertEqual(4, len(keys)) - self.assertEqual(4, len(dirs)) - self.assertEqual(['foo', 'bar', 'zoo', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc-nullsfirst', - 'desc-nullslast', 'asc'], dirs) - - def test_value_multiple_including_id(self): - keys, dirs = utils.get_sort_params('foo,bar,id') - self.assertEqual(3, len(keys)) - self.assertEqual(3, len(dirs)) - self.assertEqual(['foo', 'bar', 'id'], keys) - self.assertEqual(['asc-nullsfirst', 'asc-nullsfirst', - 'asc-nullsfirst'], dirs) - - -class ServiceAliveTest(base.SenlinTestCase): - - def test_alive(self): - cfg.CONF.set_override('periodic_interval', 100) - service = mock.Mock(updated_at=timeutils.utcnow()) - - res = utils.is_service_dead(service) - - self.assertFalse(res) - - def test_dead(self): - cfg.CONF.set_override('periodic_interval', 0) - service = mock.Mock(updated_at=timeutils.utcnow()) - - res = utils.is_service_dead(service) - - self.assertTrue(res) diff --git a/senlin/tests/unit/drivers/__init__.py b/senlin/tests/unit/drivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/drivers/test_cinder_v2.py b/senlin/tests/unit/drivers/test_cinder_v2.py deleted file mode 100644 index 6d06df7f1..000000000 --- a/senlin/tests/unit/drivers/test_cinder_v2.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.drivers.os import cinder_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestCinderV2(base.SenlinTestCase): - - def setUp(self): - super(TestCinderV2, self).setUp() - - self.ctx = utils.dummy_context() - self.conn_params = self.ctx.to_dict() - self.mock_conn = mock.Mock() - self.mock_create = self.patchobject(sdk, 'create_connection', - return_value=self.mock_conn) - self.volume = self.mock_conn.block_store - self.vo = cinder_v2.CinderClient(self.conn_params) - - def test_init(self): - self.mock_create.assert_called_once_with(self.conn_params, - service_type='block-storage') - self.assertEqual(self.mock_conn, self.vo.conn) - - def test_volume_get(self): - self.vo.volume_get('foo') - self.volume.get_volume.assert_called_once_with('foo') - - def test_volume_create(self): - self.vo.volume_create(name='foo') - self.volume.create_volume.assert_called_once_with(name='foo') - - def test_volume_delete(self): - self.vo.volume_delete('foo', True) - self.volume.delete_volume.assert_called_once_with( - 'foo', ignore_missing=True) - self.volume.delete_volume.reset_mock() - - self.vo.volume_delete('foo', False) - self.volume.delete_volume.assert_called_once_with( - 'foo', ignore_missing=False) - self.volume.delete_volume.reset_mock() - - self.vo.volume_delete('foo') - self.volume.delete_volume.assert_called_once_with( - 'foo', ignore_missing=True) - - def test_snapshot_create(self): - self.vo.snapshot_create(name='foo') - self.volume.create_snapshot.assert_called_once_with(name='foo') - - def test_snapshot_delete(self): - self.vo.snapshot_delete('foo', True) - self.volume.delete_snapshot.assert_called_once_with( - 'foo', ignore_missing=True) - self.volume.delete_snapshot.reset_mock() - - self.vo.snapshot_delete('foo', False) - self.volume.delete_snapshot.assert_called_once_with( - 'foo', ignore_missing=False) - - self.volume.delete_snapshot.reset_mock() - self.vo.snapshot_delete('foo') - self.volume.delete_snapshot.assert_called_once_with( - 'foo', ignore_missing=True) - - def test_snapshot_get(self): - self.vo.snapshot_get('foo') - self.volume.get_snapshot.assert_called_once_with('foo') - - def test_volume_type_create(self): - self.vo.volume_type_create(name='foo') - self.volume.create_type.assert_called_once_with(name='foo') - - def test_volume_type_delete(self): - self.vo.volume_type_delete('foo', True) - self.volume.delete_type.assert_called_once_with( - 'foo', ignore_missing=True) - self.volume.delete_type.reset_mock() - - self.vo.volume_type_delete('foo', False) - self.volume.delete_type.assert_called_once_with( - 'foo', ignore_missing=False) - - self.volume.delete_type.reset_mock() - self.vo.volume_type_delete('foo') - self.volume.delete_type.assert_called_once_with( - 'foo', ignore_missing=True) - - def test_volume_type_get(self): - self.vo.volume_type_get('foo') - self.volume.find_type.assert_called_once_with('foo', - ignore_missing=True) diff --git a/senlin/tests/unit/drivers/test_docker_v1.py b/senlin/tests/unit/drivers/test_docker_v1.py deleted file mode 100644 index eb414b0d3..000000000 --- a/senlin/tests/unit/drivers/test_docker_v1.py +++ /dev/null @@ -1,115 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.drivers.container import docker_v1 -from senlin.tests.unit.common import base - - -class TestDocker(base.SenlinTestCase): - - @mock.patch("docker.APIClient") - def setUp(self, mock_docker): - super(TestDocker, self).setUp() - self.x_docker = mock.Mock() - mock_docker.return_value = self.x_docker - self.sot = docker_v1.DockerClient("abc") - - @mock.patch("docker.APIClient") - def test_init(self, mock_docker): - x_docker = mock_docker.return_value - url = mock.Mock() - - sot = docker_v1.DockerClient(url) - - self.assertEqual(x_docker, sot._dockerclient) - mock_docker.assert_called_once_with(base_url=url, version='auto') - - def test_container_create(self): - image = mock.Mock() - - self.sot.container_create(image) - - self.x_docker.create_container.assert_called_once_with( - name=None, image=image, command=None) - - def test_container_delete(self): - container = mock.Mock() - - res = self.sot.container_delete(container) - - self.assertTrue(res) - self.x_docker.remove_container.assert_called_once_with(container) - - def test_restart(self): - container = mock.Mock() - - res = self.sot.restart(container) - - self.assertIsNone(res) - self.x_docker.restart.assert_called_once_with(container) - - def test_restart_with_wait(self): - container = mock.Mock() - - res = self.sot.restart(container, timeout=20) - - self.assertIsNone(res) - self.x_docker.restart.assert_called_once_with(container, timeout=20) - - def test_pause(self): - container = mock.Mock() - - res = self.sot.pause(container) - - self.assertIsNone(res) - self.x_docker.pause.assert_called_once_with(container) - - def test_unpause(self): - container = mock.Mock() - - res = self.sot.unpause(container) - - self.assertIsNone(res) - self.x_docker.unpause.assert_called_once_with(container) - - def test_start(self): - container = mock.Mock() - - res = self.sot.start(container) - - self.assertIsNone(res) - self.x_docker.start.assert_called_once_with(container) - - def test_stop(self): - container = mock.Mock() - params = {'timeout': None} - res = self.sot.stop(container, **params) - - self.assertIsNone(res) - self.x_docker.stop.assert_called_once_with(container, **params) - - def test_stop_with_wait(self): - container = mock.Mock() - params = {'timeout': 20} - res = self.sot.stop(container, **params) - - self.assertIsNone(res) - self.x_docker.stop.assert_called_once_with(container, **params) - - def test_rename(self): - container = mock.Mock() - res = self.sot.rename(container, 'new_name') - - self.assertIsNone(res) - self.x_docker.rename.assert_called_once_with(container, 'new_name') diff --git a/senlin/tests/unit/drivers/test_driver.py b/senlin/tests/unit/drivers/test_driver.py deleted file mode 100644 index 1ba20cdd0..000000000 --- a/senlin/tests/unit/drivers/test_driver.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from senlin.drivers import base as driver_base -from senlin.engine import environment -from senlin.tests.unit.common import base - - -class TestSenlinDriver(base.SenlinTestCase): - - def test_init_using_default_cloud_backend(self): - plugin1 = mock.Mock() - plugin1.compute = 'Compute1' - plugin1.orchestration = 'Orchestration1' - env = environment.global_env() - env.register_driver('openstack_test', plugin1) - cfg.CONF.set_override('cloud_backend', 'openstack_test') - - sd = driver_base.SenlinDriver() - - self.assertEqual('Compute1', sd.compute) - self.assertEqual('Orchestration1', sd.orchestration) - - def test_init_using_specified_cloud_backend(self): - plugin2 = mock.Mock() - plugin2.compute = 'Compute2' - plugin2.orchestration = 'Orchestration2' - env = environment.global_env() - env.register_driver('openstack_test', plugin2) - - sd = driver_base.SenlinDriver('openstack_test') - - self.assertEqual('Compute2', sd.compute) - self.assertEqual('Orchestration2', sd.orchestration) diff --git a/senlin/tests/unit/drivers/test_glance_v2.py b/senlin/tests/unit/drivers/test_glance_v2.py deleted file mode 100644 index 50665ce31..000000000 --- a/senlin/tests/unit/drivers/test_glance_v2.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.drivers.os import glance_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(sdk, 'create_connection') -class TestGlanceV2(base.SenlinTestCase): - - def setUp(self): - super(TestGlanceV2, self).setUp() - - self.ctx = utils.dummy_context() - self.conn_params = self.ctx.to_dict() - self.fake_conn = mock.Mock() - self.image = self.fake_conn.image - - def test_init(self, mock_create): - mock_create.return_value = self.fake_conn - - gc = glance_v2.GlanceClient(self.conn_params) - - self.assertEqual(self.fake_conn, gc.conn) - mock_create.assert_called_once_with(self.conn_params, - service_type='image') - - def test_image_find(self, mock_create): - mock_create.return_value = self.fake_conn - gc = glance_v2.GlanceClient(self.conn_params) - - res = gc.image_find('foo') - - expected = self.image.find_image.return_value - self.assertEqual(expected, res) - self.image.find_image.assert_called_once_with('foo', True) - - def test_image_find_ignore_missing(self, mock_create): - mock_create.return_value = self.fake_conn - gc = glance_v2.GlanceClient(self.conn_params) - - res = gc.image_find('foo', ignore_missing=False) - - expected = self.image.find_image.return_value - self.assertEqual(expected, res) - self.image.find_image.assert_called_once_with('foo', False) - - def test_image_get(self, mock_create): - mock_create.return_value = self.fake_conn - gc = glance_v2.GlanceClient(self.conn_params) - - res = gc.image_get('foo') - - expected = self.image.get_image.return_value - self.assertEqual(expected, res) - self.image.get_image.assert_called_once_with('foo') - - def test_image_delete(self, mock_create): - mock_create.return_value = self.fake_conn - gc = glance_v2.GlanceClient(self.conn_params) - gc.image_delete('foo') - self.image.delete_image.assert_called_once_with('foo', False) - self.image.delete_image.reset_mock() - - gc.image_delete('foo', True) - self.image.delete_image.assert_called_once_with('foo', True) - self.image.delete_image.reset_mock() - - gc.image_delete('foo', False) - self.image.delete_image.assert_called_once_with('foo', False) diff --git a/senlin/tests/unit/drivers/test_heat_v1.py b/senlin/tests/unit/drivers/test_heat_v1.py deleted file mode 100644 index 0bfca4ed7..000000000 --- a/senlin/tests/unit/drivers/test_heat_v1.py +++ /dev/null @@ -1,137 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from senlin.drivers.os import heat_v1 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestHeatV1(base.SenlinTestCase): - - def setUp(self): - super(TestHeatV1, self).setUp() - - self.context = utils.dummy_context() - self.conn_params = self.context.to_dict() - self.mock_conn = mock.Mock() - self.mock_create = self.patchobject(sdk, 'create_connection', - return_value=self.mock_conn) - self.orch = self.mock_conn.orchestration - self.hc = heat_v1.HeatClient(self.conn_params) - - def test_init(self): - self.mock_create.assert_called_once_with(self.conn_params, - service_type='orchestration') - self.assertEqual(self.mock_conn, self.hc.conn) - - def test_stack_create(self): - fake_params = { - 'disable_rollback': True, - 'stack_name': 'fake_stack', - } - self.hc.stack_create(**fake_params) - self.orch.create_stack.assert_called_once_with(**fake_params) - - def test_stack_get(self): - self.hc.stack_get('stack_id') - self.orch.get_stack.assert_called_once_with('stack_id') - - def test_stack_find(self): - self.hc.stack_find('name_or_id') - self.orch.find_stack.assert_called_once_with('name_or_id') - - def test_stack_list(self): - self.hc.stack_list() - self.orch.stacks.assert_called_once_with() - - def test_stack_update(self): - fake_params = { - "name": "new_name", - } - self.hc.stack_update('stack_id', **fake_params) - self.orch.update_stack.assert_called_once_with('stack_id', - **fake_params) - - def test_stack_delete(self): - self.hc.stack_delete('stack_id', ignore_missing=True) - self.orch.delete_stack.assert_called_once_with('stack_id', True) - - def test_stack_delete_cannot_miss(self): - self.hc.stack_check('stack_id') - self.orch.check_stack.assert_called_once_with('stack_id') - - def test_stack_get_environment(self): - self.hc.stack_get_environment('stack_id') - self.orch.get_stack_environment.assert_called_once_with('stack_id') - - def test_stack_get_files(self): - self.hc.stack_get_files('stack_id') - self.orch.get_stack_files.assert_called_once_with('stack_id') - - def test_stack_get_template(self): - self.hc.stack_get_template('stack_id') - self.orch.get_stack_template.assert_called_once_with('stack_id') - - def test_wait_for_stack(self): - self.hc.wait_for_stack('FAKE_ID', 'STATUS', [], 100, 200) - self.orch.find_stack.assert_called_once_with('FAKE_ID', False) - stk = self.orch.find_stack.return_value - self.orch.wait_for_status.assert_called_once_with( - stk, 'STATUS', [], 100, 200) - - def test_wait_for_stack_failures_not_specified(self): - self.hc.wait_for_stack('FAKE_ID', 'STATUS', None, 100, 200) - self.orch.find_stack.assert_called_once_with('FAKE_ID', False) - stk = self.orch.find_stack.return_value - self.orch.wait_for_status.assert_called_once_with( - stk, 'STATUS', [], 100, 200) - - def test_wait_for_stack_default_timeout(self): - cfg.CONF.set_override('default_action_timeout', 361) - - self.hc.wait_for_stack('FAKE_ID', 'STATUS', None, 100, None) - self.orch.find_stack.assert_called_once_with('FAKE_ID', False) - stk = self.orch.find_stack.return_value - self.orch.wait_for_status.assert_called_once_with( - stk, 'STATUS', [], 100, 361) - - def test_wait_for_stack_delete_successful(self): - fake_stack = mock.Mock(id='stack_id') - self.orch.find_stack.return_value = fake_stack - self.hc.wait_for_stack_delete('stack_id') - self.orch.find_stack.assert_called_once_with('stack_id', True) - self.orch.wait_for_delete.assert_called_once_with(fake_stack, - wait=3600) - - def test_wait_for_stack_not_found(self): - self.orch.find_stack.return_value = None - self.hc.wait_for_stack('FAKE_ID', 'STATUS', [], 100, 200) - self.assertEqual(0, self.orch.wait_for_status.call_count) - - def test_wait_for_stack_delete_with_resource_not_found(self): - self.orch.find_stack.return_value = None - self.hc.wait_for_stack_delete('stack_id') - self.orch.find_stack.assert_called_once_with('stack_id', True) - - def test_wait_for_server_delete_with_timeout(self): - cfg.CONF.set_override('default_action_timeout', 360) - fake_stack = mock.Mock(id='stack_id') - self.orch.find_stack.return_value = fake_stack - - self.hc.wait_for_stack_delete('stack_id') - self.orch.wait_for_delete.assert_called_once_with(fake_stack, - wait=360) diff --git a/senlin/tests/unit/drivers/test_keystone_v3.py b/senlin/tests/unit/drivers/test_keystone_v3.py deleted file mode 100644 index 7ecd5d9ad..000000000 --- a/senlin/tests/unit/drivers/test_keystone_v3.py +++ /dev/null @@ -1,294 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_config import cfg - -from senlin.drivers.os import keystone_v3 as kv3 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(sdk, 'create_connection') -class TestKeystoneV3(base.SenlinTestCase): - - def setUp(self): - super(TestKeystoneV3, self).setUp() - - self.ctx = utils.dummy_context() - self.conn = mock.Mock() - - def test_init(self, mock_create): - mock_create.return_value = self.conn - kc = kv3.KeystoneClient({'k': 'v'}) - - mock_create.assert_called_once_with({'k': 'v'}) - self.assertEqual(self.conn, kc.conn) - self.assertEqual(self.conn.session, kc.session) - - def test_trust_get_by_trustor(self, mock_create): - trust1 = mock.Mock() - trust1.trustee_user_id = 'USER_A_ID' - trust1.project_id = 'PROJECT_ID_1' - - trust2 = mock.Mock() - trust2.trustee_user_id = 'USER_B_ID' - trust2.project_id = 'PROJECT_ID_1' - - trust3 = mock.Mock() - trust3.trustee_user_id = 'USER_A_ID' - trust3.project_id = 'PROJECT_ID_2' - - self.conn.identity.trusts.return_value = [trust1, trust2, trust3] - mock_create.return_value = self.conn - kc = kv3.KeystoneClient({'k': 'v'}) - - # no trustee/project filter, matching 1st - res = kc.trust_get_by_trustor('USER_A') - self.assertEqual(trust1, res) - - # trustee specified, matching 2nd - res = kc.trust_get_by_trustor('USER_A', 'USER_B_ID') - self.assertEqual(trust2, res) - - # project specified, matching 3rd - res = kc.trust_get_by_trustor('USER_A', project='PROJECT_ID_2') - self.assertEqual(trust3, res) - - # both trustee and project specified, matching 3rd - res = kc.trust_get_by_trustor('USER_A', 'USER_A_ID', 'PROJECT_ID_2') - self.assertEqual(trust3, res) - - # No matching record found - res = kc.trust_get_by_trustor('USER_A', 'USER_C_ID') - self.assertIsNone(res) - - get_calls = [mock.call(trustor_user_id='USER_A')] - self.conn.identity.trusts.assert_has_calls(get_calls * 5) - - def test_trust_create(self, mock_create): - self.conn.identity.create_trust.return_value = 'new_trust' - mock_create.return_value = self.conn - kc = kv3.KeystoneClient({'k': 'v'}) - - # default - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID') - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=True, - allow_redelegation=True, roles=[]) - self.conn.reset_mock() - - # with roles - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID', - ['r1', 'r2']) - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=True, - allow_redelegation=True, - roles=[{'name': 'r1'}, {'name': 'r2'}]) - self.conn.reset_mock() - - # impersonation - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID', - impersonation=False) - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=False, - allow_redelegation=True, roles=[]) - self.conn.reset_mock() - - def test_trust_create_conf_roles(self, mock_create): - cfg.CONF.set_override('trust_roles', ['r1', 'r2']) - self.conn.identity.create_trust.return_value = 'new_trust' - mock_create.return_value = self.conn - kc = kv3.KeystoneClient({'k': 'v'}) - - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID', [ - 'r1', 'r2', 'r3']) - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=True, - allow_redelegation=True, roles=[{'name': 'r1'}, {'name': 'r2'}]) - self.conn.reset_mock() - - cfg.CONF.set_override('trust_roles', []) - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID', - ['r1', 'r2']) - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=True, - allow_redelegation=True, - roles=[{'name': 'r1'}, {'name': 'r2'}]) - self.conn.reset_mock() - - # impersonation - res = kc.trust_create('ID_JOHN', 'ID_DOE', 'PROJECT_ID', - impersonation=False) - - self.assertEqual('new_trust', res) - self.conn.identity.create_trust.assert_called_once_with( - trustor_user_id='ID_JOHN', trustee_user_id='ID_DOE', - project_id='PROJECT_ID', impersonation=False, - allow_redelegation=True, roles=[]) - self.conn.reset_mock() - - @mock.patch.object(sdk, 'authenticate') - def test_get_token(self, mock_auth, mock_create): - access_info = {'token': '123', 'user_id': 'abc', 'project_id': 'xyz'} - mock_auth.return_value = access_info - - token = kv3.KeystoneClient.get_token(key='value') - - mock_auth.assert_called_once_with(key='value') - self.assertEqual('123', token) - - @mock.patch.object(sdk, 'authenticate') - def test_get_user_id(self, mock_auth, mock_create): - access_info = {'token': '123', 'user_id': 'abc', 'project_id': 'xyz'} - mock_auth.return_value = access_info - - user_id = kv3.KeystoneClient.get_user_id(key='value') - - mock_auth.assert_called_once_with(key='value') - self.assertEqual('abc', user_id) - - def test_get_service_credentials_with_tls(self, mock_create): - cfg.CONF.set_override('auth_url', 'FAKE_URL', group='authentication') - cfg.CONF.set_override('service_username', 'FAKE_USERNAME', - group='authentication') - cfg.CONF.set_override('service_password', 'FAKE_PASSWORD', - group='authentication') - cfg.CONF.set_override('service_project_name', 'FAKE_PROJECT', - group='authentication') - cfg.CONF.set_override('service_user_domain', 'FAKE_DOMAIN_1', - group='authentication') - cfg.CONF.set_override('service_project_domain', 'FAKE_DOMAIN_2', - group='authentication') - cfg.CONF.set_override('interface', 'internal', - group='authentication') - cfg.CONF.set_override('cafile', '/fake/capath', - group='authentication') - cfg.CONF.set_override('certfile', '/fake/certpath', - group='authentication') - cfg.CONF.set_override('keyfile', '/fake/keypath', - group='authentication') - expected = { - 'auth_url': 'FAKE_URL', - 'username': 'FAKE_USERNAME', - 'password': 'FAKE_PASSWORD', - 'project_name': 'FAKE_PROJECT', - 'user_domain_name': 'FAKE_DOMAIN_1', - 'project_domain_name': 'FAKE_DOMAIN_2', - 'interface': 'internal', - 'cert': '/fake/certpath', - 'key': '/fake/keypath', - 'cacert': '/fake/capath', - 'verify': True - } - actual = kv3.KeystoneClient.get_service_credentials() - - self.assertEqual(expected, actual) - - new_expected = copy.copy(expected) - new_expected['key1'] = 'value1' - new_expected['password'] = 'NEW_PASSWORD' - - actual = kv3.KeystoneClient.get_service_credentials( - key1='value1', password='NEW_PASSWORD') - - self.assertEqual(new_expected, actual) - - def test_get_service_credentials(self, mock_create): - cfg.CONF.set_override('auth_url', 'FAKE_URL', group='authentication') - cfg.CONF.set_override('service_username', 'FAKE_USERNAME', - group='authentication') - cfg.CONF.set_override('service_password', 'FAKE_PASSWORD', - group='authentication') - cfg.CONF.set_override('service_project_name', 'FAKE_PROJECT', - group='authentication') - cfg.CONF.set_override('service_user_domain', 'FAKE_DOMAIN_1', - group='authentication') - cfg.CONF.set_override('service_project_domain', 'FAKE_DOMAIN_2', - group='authentication') - cfg.CONF.set_override('verify_ssl', False, - group='authentication') - cfg.CONF.set_override('interface', 'internal', - group='authentication') - - expected = { - 'auth_url': 'FAKE_URL', - 'username': 'FAKE_USERNAME', - 'password': 'FAKE_PASSWORD', - 'project_name': 'FAKE_PROJECT', - 'user_domain_name': 'FAKE_DOMAIN_1', - 'project_domain_name': 'FAKE_DOMAIN_2', - 'verify': False, - 'interface': 'internal', - } - - actual = kv3.KeystoneClient.get_service_credentials() - - self.assertEqual(expected, actual) - - new_expected = copy.copy(expected) - new_expected['key1'] = 'value1' - new_expected['password'] = 'NEW_PASSWORD' - - actual = kv3.KeystoneClient.get_service_credentials( - key1='value1', password='NEW_PASSWORD') - - self.assertEqual(new_expected, actual) - - def test_validate_regions(self, mock_create): - self.conn.identity.regions.return_value = [ - {'id': 'R1', 'parent_region_id': None}, - {'id': 'R2', 'parent_region_id': None}, - {'id': 'R3', 'parent_region_id': 'R1'}, - ] - mock_create.return_value = self.conn - - kc = kv3.KeystoneClient({'k': 'v'}) - - res = kc.validate_regions(['R1', 'R4']) - - self.assertIn('R1', res) - self.assertNotIn('R4', res) - - res = kc.validate_regions([]) - self.assertEqual([], res) - - def test_get_senlin_endpoint(self, mock_create): - cfg.CONF.set_override('default_region_name', 'RegionN') - self.conn.session.get_endpoint.return_value = 'http://web.com:1234/v1' - mock_create.return_value = self.conn - kc = kv3.KeystoneClient({'k': 'v'}) - - res = kc.get_senlin_endpoint() - - self.assertEqual('http://web.com:1234/v1', res) - self.conn.session.get_endpoint.assert_called_once_with( - service_type='clustering', interface='public', - region_name='RegionN') diff --git a/senlin/tests/unit/drivers/test_lbaas.py b/senlin/tests/unit/drivers/test_lbaas.py deleted file mode 100644 index 83cc4b357..000000000 --- a/senlin/tests/unit/drivers/test_lbaas.py +++ /dev/null @@ -1,871 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet -from unittest import mock - -from oslo_context import context as oslo_context - -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.drivers.os import lbaas -from senlin.drivers.os import neutron_v2 -from senlin.drivers.os import octavia_v2 -from senlin.engine import node as nodem -from senlin.profiles import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestOctaviaLBaaSDriver(base.SenlinTestCase): - - def setUp(self): - super(TestOctaviaLBaaSDriver, self).setUp() - self.context = utils.dummy_context() - self.conn_params = self.context.to_dict() - self.lb_driver = lbaas.LoadBalancerDriver(self.conn_params) - self.lb_driver.lb_status_timeout = 10 - self.patchobject(neutron_v2, 'NeutronClient') - self.patchobject(octavia_v2, 'OctaviaClient') - self.nc = self.lb_driver.nc() - self.oc = self.lb_driver.oc() - - self.vip = { - 'subnet': 'subnet-01', - 'address': '192.168.1.100', - 'admin_state_up': True, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'connection_limit': 50 - } - self.pool = { - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'session_persistence': { - 'type': 'SOURCE_IP', - 'cookie_name': 'whatever', - }, - 'admin_state_up': True, - } - self.hm = { - "type": "HTTP", - "delay": "1", - "timeout": 1, - "max_retries": 5, - "pool_id": "POOL_ID", - "admin_state_up": True, - "http_method": "GET", - "url_path": "/index.html", - "expected_codes": "200,201,202" - } - self.availability_zone = 'my_fake_az' - self.flavor_id = 'my_fake_flavor_id' - - def test_init(self): - conn_params = self.context.to_dict() - conn_params['lb_status_timeout'] = 10 - - res = lbaas.LoadBalancerDriver(conn_params) - self.assertEqual(conn_params, res.conn_params) - self.assertIsNone(res._nc) - - @mock.patch.object(neutron_v2, 'NeutronClient') - def test_nc_initialize(self, mock_neutron_client): - conn_params = self.context.to_dict() - conn_params['lb_status_timeout'] = 10 - - fake_nc = mock.Mock() - mock_neutron_client.return_value = fake_nc - lb_driver = lbaas.LoadBalancerDriver(conn_params) - self.assertIsNone(lb_driver._nc) - - # Create a new NeutronClient - res = lb_driver.nc() - mock_neutron_client.assert_called_once_with(conn_params) - self.assertEqual(fake_nc, res) - - # Use the existing NeutronClient stored in self._nc - fake_nc_new = mock.Mock() - mock_neutron_client.return_value = fake_nc_new - res1 = lb_driver.nc() - mock_neutron_client.assert_called_once_with(conn_params) - self.assertNotEqual(fake_nc_new, res1) - self.assertEqual(res, res1) - - def test_wait_for_lb_ready(self): - lb_id = 'ID1' - lb_obj = mock.Mock() - lb_obj.id = lb_id - lb_obj.provisioning_status = 'ACTIVE' - lb_obj.operating_status = 'ONLINE' - self.oc.loadbalancer_get.return_value = lb_obj - - res = self.lb_driver._wait_for_lb_ready(lb_id) - self.assertTrue(res) - - def test_wait_for_lb_ready_ignore_not_found(self): - lb_id = 'LB_ID' - self.oc.loadbalancer_get.return_value = None - - res = self.lb_driver._wait_for_lb_ready(lb_id, ignore_not_found=True) - - self.assertTrue(res) - - @mock.patch.object(eventlet, 'sleep') - def test_wait_for_lb_ready_timeout(self, mock_sleep): - lb_id = 'LB_ID' - lb_obj = mock.Mock(id=lb_id) - self.oc.loadbalancer_get.return_value = lb_obj - lb_obj.provisioning_status = 'PENDING_UPDATE' - lb_obj.operating_status = 'OFFLINE' - - res = self.lb_driver._wait_for_lb_ready(lb_id) - - self.assertFalse(res) - mock_sleep.assert_called_once_with(10) - - def test_lb_create_succeeded_session_persistence_none(self): - lb_obj = mock.Mock() - listener_obj = mock.Mock() - pool_obj = mock.Mock() - hm_obj = mock.Mock() - lb_obj.id = 'LB_ID' - lb_obj.vip_address = '192.168.1.100' - listener_obj.id = 'LISTENER_ID' - pool_obj.id = 'POOL_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - hm_obj.id = 'HEALTHMONITOR_ID' - cluster_name = 'test_cluster' - - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.oc.pool_create.return_value = pool_obj - self.oc.healthmonitor_create.return_value = hm_obj - self.nc.subnet_get.return_value = subnet_obj - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - temp_pool = copy.deepcopy(self.pool) - temp_pool['session_persistence'] = {} - status, res = self.lb_driver.lb_create(self.vip, temp_pool, - cluster_name, self.hm, - self.availability_zone, - self.flavor_id) - - self.assertTrue(status) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - 'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'], - name=lb_name, availability_zone=self.availability_zone, - flavor_id=self.flavor_id) - self.assertEqual('LB_ID', res['loadbalancer']) - self.assertEqual('192.168.1.100', res['vip_address']) - listener_name = 'senlin-listener-%s' % cluster_name - self.oc.listener_create.assert_called_once_with( - 'LB_ID', self.vip['protocol'], self.vip['protocol_port'], - self.vip['connection_limit'], self.vip['admin_state_up'], - name=listener_name) - self.assertEqual('LISTENER_ID', res['listener']) - pool_name = 'senlin-pool-%s' % cluster_name - self.oc.pool_create.assert_called_once_with( - temp_pool['lb_method'], 'LISTENER_ID', temp_pool['protocol'], - temp_pool['session_persistence'], temp_pool['admin_state_up'], - name=pool_name) - self.assertEqual('POOL_ID', res['pool']) - self.oc.healthmonitor_create.assert_called_once_with( - self.hm['type'], self.hm['delay'], self.hm['timeout'], - self.hm['max_retries'], 'POOL_ID', self.hm['admin_state_up'], - self.hm['http_method'], self.hm['url_path'], - self.hm['expected_codes']) - self.assertEqual('HEALTHMONITOR_ID', res['healthmonitor']) - self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID') - calls = [mock.call('LB_ID') for i in range(1, 5)] - self.lb_driver._wait_for_lb_ready.assert_has_calls( - calls, any_order=False) - - def test_lb_create_succeeded_subnet(self): - lb_obj = mock.Mock() - listener_obj = mock.Mock() - pool_obj = mock.Mock() - hm_obj = mock.Mock() - lb_obj.id = 'LB_ID' - lb_obj.vip_address = '192.168.1.100' - listener_obj.id = 'LISTENER_ID' - pool_obj.id = 'POOL_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - hm_obj.id = 'HEALTHMONITOR_ID' - cluster_name = 'test_cluster' - - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.oc.pool_create.return_value = pool_obj - self.oc.healthmonitor_create.return_value = hm_obj - self.nc.subnet_get.return_value = subnet_obj - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm, - self.availability_zone, - self.flavor_id) - - self.assertTrue(status) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - 'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'], - name=lb_name, availability_zone=self.availability_zone, - flavor_id=self.flavor_id) - self.assertEqual('LB_ID', res['loadbalancer']) - self.assertEqual('192.168.1.100', res['vip_address']) - listener_name = 'senlin-listener-%s' % cluster_name - self.oc.listener_create.assert_called_once_with( - 'LB_ID', self.vip['protocol'], self.vip['protocol_port'], - self.vip['connection_limit'], self.vip['admin_state_up'], - name=listener_name) - self.assertEqual('LISTENER_ID', res['listener']) - pool_name = 'senlin-pool-%s' % cluster_name - self.oc.pool_create.assert_called_once_with( - self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'], - self.pool['session_persistence'], self.pool['admin_state_up'], - name=pool_name) - self.assertEqual('POOL_ID', res['pool']) - self.oc.healthmonitor_create.assert_called_once_with( - self.hm['type'], self.hm['delay'], self.hm['timeout'], - self.hm['max_retries'], 'POOL_ID', self.hm['admin_state_up'], - self.hm['http_method'], self.hm['url_path'], - self.hm['expected_codes']) - self.assertEqual('HEALTHMONITOR_ID', res['healthmonitor']) - self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID') - calls = [mock.call('LB_ID') for i in range(1, 5)] - self.lb_driver._wait_for_lb_ready.assert_has_calls( - calls, any_order=False) - - def test_lb_create_succeeded_network(self): - vip = { - 'network': 'network-01', - 'address': '192.168.1.100', - 'admin_state_up': True, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'connection_limit': 50 - } - lb_obj = mock.Mock() - listener_obj = mock.Mock() - pool_obj = mock.Mock() - hm_obj = mock.Mock() - lb_obj.id = 'LB_ID' - lb_obj.vip_address = '192.168.1.100' - listener_obj.id = 'LISTENER_ID' - pool_obj.id = 'POOL_ID' - network_obj = mock.Mock() - network_obj.name = 'network' - network_obj.id = 'NETWORK_ID' - hm_obj.id = 'HEALTHMONITOR_ID' - cluster_name = 'test_cluster' - - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.oc.pool_create.return_value = pool_obj - self.oc.healthmonitor_create.return_value = hm_obj - self.nc.network_get.return_value = network_obj - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - status, res = self.lb_driver.lb_create(vip, self.pool, - cluster_name, self.hm, - self.availability_zone) - - self.assertTrue(status) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - None, 'NETWORK_ID', vip['address'], vip['admin_state_up'], - name=lb_name, availability_zone=self.availability_zone, - flavor_id=None) - self.assertEqual('LB_ID', res['loadbalancer']) - self.assertEqual('192.168.1.100', res['vip_address']) - listener_name = 'senlin-listener-%s' % cluster_name - self.oc.listener_create.assert_called_once_with( - 'LB_ID', vip['protocol'], vip['protocol_port'], - vip['connection_limit'], vip['admin_state_up'], - name=listener_name) - self.assertEqual('LISTENER_ID', res['listener']) - pool_name = 'senlin-pool-%s' % cluster_name - self.oc.pool_create.assert_called_once_with( - self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'], - self.pool['session_persistence'], self.pool['admin_state_up'], - name=pool_name) - self.assertEqual('POOL_ID', res['pool']) - self.oc.healthmonitor_create.assert_called_once_with( - self.hm['type'], self.hm['delay'], self.hm['timeout'], - self.hm['max_retries'], 'POOL_ID', self.hm['admin_state_up'], - self.hm['http_method'], self.hm['url_path'], - self.hm['expected_codes']) - self.assertEqual('HEALTHMONITOR_ID', res['healthmonitor']) - self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID') - calls = [mock.call('LB_ID') for i in range(1, 5)] - self.lb_driver._wait_for_lb_ready.assert_has_calls( - calls, any_order=False) - - def test_lb_create_loadbalancer_creation_failed(self): - lb_obj = mock.Mock() - lb_obj.id = 'LB_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - cluster_name = 'test_cluster' - self.oc.loadbalancer_create.return_value = lb_obj - self.nc.subnet_get.return_value = subnet_obj - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [False] - self.lb_driver.lb_delete = mock.Mock() - - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm) - self.assertFalse(status) - msg = _('Failed in creating loadbalancer (%s).') % 'LB_ID' - self.assertEqual(msg, res) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - 'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'], - availability_zone=None, name=lb_name, flavor_id=None) - self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID') - self.lb_driver.lb_delete.assert_called_once_with( - loadbalancer='LB_ID') - - # Exception happens in subnet_get. - self.nc.subnet_get.side_effect = exception.InternalError( - code=500, message='GET FAILED') - status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm) - self.assertFalse(status) - msg = _('Failed in getting subnet: GET FAILED.') - self.assertEqual(msg, res) - - # Exception happens in loadbalancer_create. - self.nc.subnet_get.side_effect = None - self.oc.loadbalancer_create.side_effect = exception.InternalError( - code=500, message='CREATE FAILED') - status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm) - self.assertFalse(status) - msg = _('Failed in creating loadbalancer: CREATE FAILED.') - self.assertEqual(msg, res) - - @mock.patch.object(eventlet, 'sleep') - def test_lb_create_listener_creation_failed(self, mock_sleep): - lb_obj = mock.Mock() - listener_obj = mock.Mock() - lb_obj.id = 'LB_ID' - listener_obj.id = 'LISTENER_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - cluster_name = 'test_cluster' - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, False] - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.nc.subnet_get.return_value = subnet_obj - self.lb_driver.lb_delete = mock.Mock() - - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm) - self.assertFalse(status) - msg = _('Failed in creating listener (%s).') % 'LISTENER_ID' - self.assertEqual(msg, res) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - 'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'], - availability_zone=None, name=lb_name, flavor_id=None) - listener_name = 'senlin-listener-%s' % cluster_name - self.oc.listener_create.assert_called_once_with( - 'LB_ID', self.vip['protocol'], self.vip['protocol_port'], - self.vip['connection_limit'], self.vip['admin_state_up'], - name=listener_name) - self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID') - self.lb_driver.lb_delete.assert_called_once_with( - loadbalancer='LB_ID', listener='LISTENER_ID') - - # Exception happens in listen_create - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, False] - self.oc.listener_create.side_effect = exception.InternalError( - code=500, message='CREATE FAILED') - status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm) - self.assertFalse(status) - msg = _('Failed in creating lb listener: CREATE FAILED.') - self.assertEqual(msg, res) - - def test_lb_create_pool_creation_failed(self): - lb_obj = mock.Mock() - listener_obj = mock.Mock() - pool_obj = mock.Mock() - lb_obj.id = 'LB_ID' - lb_obj.vip_address = '192.169.1.100' - listener_obj.id = 'LISTENER_ID' - pool_obj.id = 'POOL_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - cluster_name = 'test_cluster' - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, True, False] - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.oc.pool_create.return_value = pool_obj - self.nc.subnet_get.return_value = subnet_obj - self.lb_driver.lb_delete = mock.Mock() - - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm) - self.assertFalse(status) - msg = _('Failed in creating pool (%s).') % 'POOL_ID' - self.assertEqual(msg, res) - lb_name = 'senlin-lb-%s' % cluster_name - self.oc.loadbalancer_create.assert_called_once_with( - 'SUBNET_ID', None, self.vip['address'], self.vip['admin_state_up'], - availability_zone=None, name=lb_name, flavor_id=None) - listener_name = 'senlin-listener-%s' % cluster_name - self.oc.listener_create.assert_called_once_with( - 'LB_ID', self.vip['protocol'], self.vip['protocol_port'], - self.vip['connection_limit'], self.vip['admin_state_up'], - name=listener_name) - pool_name = 'senlin-pool-%s' % cluster_name - self.oc.pool_create.assert_called_once_with( - self.pool['lb_method'], 'LISTENER_ID', self.pool['protocol'], - self.pool['session_persistence'], self.pool['admin_state_up'], - name=pool_name) - self.lb_driver._wait_for_lb_ready.assert_called_with('LB_ID') - self.lb_driver.lb_delete.assert_called_once_with( - loadbalancer='LB_ID', listener='LISTENER_ID', pool='POOL_ID') - - # Exception happens in pool_create - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, True, False] - self.oc.pool_create.side_effect = exception.InternalError( - code=500, message='CREATE FAILED') - status, res = self.lb_driver.lb_create(self.vip, self.pool, self.hm) - self.assertFalse(status) - msg = _('Failed in creating lb pool: CREATE FAILED.') - self.assertEqual(msg, res) - - def test_lb_create_healthmonitor_creation_failed(self): - lb_obj = mock.Mock() - listener_obj = mock.Mock() - pool_obj = mock.Mock() - hm_obj = mock.Mock() - lb_obj.id = 'LB_ID' - listener_obj.id = 'LISTENER_ID' - pool_obj.id = 'POOL_ID' - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - hm_obj.id = 'HEALTHMONITOR_ID' - cluster_name = 'test_cluster' - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, True, - True, False] - self.oc.loadbalancer_create.return_value = lb_obj - self.oc.listener_create.return_value = listener_obj - self.oc.pool_create.return_value = pool_obj - self.oc.healthmonitor_create.return_value = hm_obj - self.nc.subnet_get.return_value = subnet_obj - self.lb_driver.lb_delete = mock.Mock() - - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm) - self.assertFalse(status) - msg = _('Failed in creating health monitor (%s).') % 'HEALTHMONITOR_ID' - self.assertEqual(msg, res) - self.lb_driver.lb_delete.assert_called_once_with( - loadbalancer='LB_ID', listener='LISTENER_ID', pool='POOL_ID', - healthmonitor='HEALTHMONITOR_ID') - - # Exception happens in healthmonitor_create - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, True, True] - self.oc.healthmonitor_create.side_effect = exception.InternalError( - code=500, message='CREATE FAILED') - status, res = self.lb_driver.lb_create(self.vip, self.pool, - cluster_name, self.hm) - self.assertFalse(status) - msg = _('Failed in creating lb health monitor: CREATE FAILED.') - self.assertEqual(msg, res) - - @mock.patch.object(neutron_v2, 'NeutronClient') - def test_lb_find(self, mock_neutron): - self.lb_driver.lb_find("FAKELB") - self.oc.loadbalancer_get.assert_called_once_with( - "FAKELB", False, False) - - def test_lb_delete(self): - kwargs = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - - status, res = self.lb_driver.lb_delete(**kwargs) - self.assertTrue(status) - self.assertEqual('LB deletion succeeded', res) - self.oc.loadbalancer_delete.assert_called_once_with('LB_ID') - self.oc.listener_delete.assert_called_once_with('LISTENER_ID') - self.oc.pool_delete.assert_called_once_with('POOL_ID') - self.oc.healthmonitor_delete.assert_called_once_with('HM_ID') - calls = [mock.call('LB_ID') for i in range(1, 4)] - self.lb_driver._wait_for_lb_ready.assert_has_calls( - calls, any_order=False) - - def test_lb_healthmonitor_delete_internalerror(self): - kwargs = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - self.oc.healthmonitor_delete.side_effect = exception.InternalError( - code=500, message='DELETE FAILED') - status, res = self.lb_driver.lb_delete(**kwargs) - self.assertFalse(status) - msg = _('Failed in deleting healthmonitor: DELETE FAILED.') - self.assertEqual(msg, res) - - def test_lb_pool_delete_internalerror(self): - kwargs = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - self.oc.pool_delete.side_effect = exception.InternalError( - code=500, message='DELETE FAILED') - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - status, res = self.lb_driver.lb_delete(**kwargs) - self.assertFalse(status) - msg = _('Failed in deleting lb pool: DELETE FAILED.') - self.assertEqual(msg, res) - - def test_lb_listener_delete_internalerror(self): - kwargs = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - self.oc.listener_delete.side_effect = exception.InternalError( - code=500, message='DELETE FAILED') - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - status, res = self.lb_driver.lb_delete(**kwargs) - self.assertFalse(status) - msg = _('Failed in deleting listener: DELETE FAILED.') - self.assertEqual(msg, res) - - def test_lb_delete_no_physical_object(self): - kwargs = {'loadbalancer': 'LB_ID'} - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - - status, res = self.lb_driver.lb_delete(**kwargs) - self.assertTrue(status) - self.assertEqual('LB deletion succeeded', res) - self.oc.loadbalancer_delete.assert_called_once_with('LB_ID') - self.assertEqual(0, self.oc.healthmonitor_delete.call_count) - self.assertEqual(0, self.oc.pool_delete.call_count) - self.assertEqual(0, self.oc.listener_delete.call_count) - self.lb_driver._wait_for_lb_ready.assert_called_once_with( - 'LB_ID', ignore_not_found=True) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_succeeded(self, mock_get_current, mock_pb_load): - fake_context = mock.Mock() - mock_get_current.return_value = fake_context - node = mock.Mock() - lb_id = 'LB_ID' - pool_id = 'POOL_ID' - port = '80' - subnet = 'subnet' - subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID') - subnet_obj.ip_version = '4' - subnet_obj.name = 'subnet' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network1' - member = mock.Mock(id='MEMBER_ID') - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr1_net1', 'version': '6'}, - {'addr': 'ipaddr2_net1', 'version': '4'}], - 'network2': [{'addr': 'ipaddr_net2', 'version': '4'}] - } - } - name = node_detail.get('name') - mock_pb_load.return_value.do_get_details.return_value = node_detail - - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.return_value = network_obj - self.oc.pool_member_create.return_value = member - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - - res = self.lb_driver.member_add(node, lb_id, pool_id, port, subnet) - self.assertEqual('MEMBER_ID', res) - self.nc.subnet_get.assert_called_once_with(subnet) - self.nc.network_get.assert_called_once_with('NETWORK_ID') - # Make sure the ip matches with subnet ip_version - self.oc.pool_member_create.assert_called_once_with( - name, pool_id, 'ipaddr2_net1', port, 'SUBNET_ID') - self.lb_driver._wait_for_lb_ready.assert_has_calls( - [mock.call('LB_ID'), mock.call('LB_ID')]) - - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_subnet_get_failed(self, mock_get_current): - self.nc.subnet_get.side_effect = exception.InternalError( - code=500, message="Can't find subnet") - res = self.lb_driver.member_add('node', 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_network_get_failed(self, mock_get_current): - subnet_obj = mock.Mock() - subnet_obj.name = 'subnet' - subnet_obj.id = 'SUBNET_ID' - subnet_obj.network_id = 'NETWORK_ID' - - # Exception happens in network_get - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.side_effect = exception.InternalError( - code=500, message="Can't find NETWORK_ID") - res = self.lb_driver.member_add('node', 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(nodem.Node, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_lb_unready_for_member_create(self, mock_get_current, - mock_load, mock_pb_load): - node = mock.Mock() - subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID') - subnet_obj.name = 'subnet' - subnet_obj.ip_version = '4' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network1' - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr_net1', 'version': '4'}], - 'network2': [{'addr': 'ipaddr_net2', 'version': '4'}] - } - } - mock_load.return_value = node - mock_pb_load.return_value.do_get_details.return_value = node_detail - - # Exception happens in pool_member_create - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = False - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.return_value = network_obj - self.oc.pool_member_create.side_effect = exception.InternalError( - code=500, message="CREATE FAILED") - res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - self.lb_driver._wait_for_lb_ready.assert_called_once_with('LB_ID') - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(nodem.Node, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_member_create_failed(self, mock_get_current, - mock_load, mock_pb_load): - node = mock.Mock() - subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID') - subnet_obj.name = 'subnet' - subnet_obj.ip_version = '4' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network1' - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr_net1', 'version': '4'}], - 'network2': [{'addr': 'ipaddr_net2', 'version': '4'}] - } - } - mock_load.return_value = node - mock_pb_load.return_value.do_get_details.return_value = node_detail - - # Exception happens in pool_member_create - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.return_value = network_obj - self.oc.pool_member_create.side_effect = exception.InternalError( - code=500, message="CREATE FAILED") - res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(nodem.Node, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_ip_version_match_failed(self, mock_get_current, - mock_load, mock_pb_load): - node = mock.Mock() - subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID') - subnet_obj.name = 'subnet' - subnet_obj.ip_version = '4' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network1' - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr_net1', 'version': '6'}], - 'network2': [{'addr': 'ipaddr_net2', 'version': '6'}] - } - } - mock_load.return_value = node - mock_pb_load.return_value.do_get_details.return_value = node_detail - - # Node does not match with subnet ip_version - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.return_value = network_obj - self.oc.pool_member_create = mock.Mock(id='MEMBER_ID') - res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(nodem.Node, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_wait_for_lb_timeout(self, mock_get_current, mock_load, - mock_pb_load): - node = mock.Mock() - subnet_obj = mock.Mock(id='SUBNET_ID', network_id='NETWORK_ID') - subnet_obj.name = 'subnet' - subnet_obj.ip_version = '4' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network1' - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr_net1', 'version': '4'}], - 'network2': [{'addr': 'ipaddr_net2', 'version': '4'}] - } - } - mock_load.return_value = node - mock_pb_load.return_value.do_get_details.return_value = node_detail - - # Wait for lb ready timeout after creating member - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, False] - self.nc.subnet_get.return_value = subnet_obj - self.nc.network_get.return_value = network_obj - res = self.lb_driver.member_add(node, 'LB_ID', 'POOL_ID', 80, - 'subnet') - self.assertIsNone(res) - - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(nodem.Node, 'load') - @mock.patch.object(oslo_context, 'get_current') - def test_member_add_node_not_in_subnet(self, mock_get_current, mock_load, - mock_pb_load): - node = mock.Mock() - lb_id = 'LB_ID' - pool_id = 'POOL_ID' - port = '80' - subnet = 'subnet' - network_obj = mock.Mock(id='NETWORK_ID') - network_obj.name = 'network3' - node_detail = { - 'name': 'node-01', - 'addresses': { - 'network1': [{'addr': 'ipaddr_net1'}], - 'network2': [{'addr': 'ipaddr_net2'}] - } - } - mock_load.return_value = node - mock_pb_load.return_value.do_get_details.return_value = node_detail - - self.nc.network_get.return_value = network_obj - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - - res = self.lb_driver.member_add(node, lb_id, pool_id, port, subnet) - self.assertIsNone(res) - - def test_member_remove_succeeded(self): - lb_id = 'LB_ID' - pool_id = 'POOL_ID' - member_id = 'MEMBER_ID' - - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - - res = self.lb_driver.member_remove(lb_id, pool_id, member_id) - self.assertTrue(res) - self.oc.pool_member_delete.assert_called_once_with(pool_id, member_id) - self.lb_driver._wait_for_lb_ready.assert_has_calls( - [mock.call(lb_id, ignore_not_found=True), - mock.call(lb_id, ignore_not_found=True)]) - - def test_member_remove_lb_unready_for_member_delete(self): - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = False - - res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID') - self.assertFalse(res) - self.lb_driver._wait_for_lb_ready.assert_has_calls( - [mock.call('LB_ID', ignore_not_found=True), - mock.call('LB_ID', ignore_not_found=True)]) - - def test_member_remove_member_delete_failed(self): - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.return_value = True - self.oc.pool_member_delete.side_effect = exception.InternalError( - code=500, message='') - - res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID') - self.assertFalse(res) - self.oc.pool_member_delete.assert_called_once_with('POOL_ID', - 'MEMBER_ID') - - def test_member_remove_wait_for_lb_timeout(self): - self.lb_driver._wait_for_lb_ready = mock.Mock() - self.lb_driver._wait_for_lb_ready.side_effect = [True, False] - self.oc.pool_member_delete.side_effect = None - - res = self.lb_driver.member_remove('LB_ID', 'POOL_ID', 'MEMBER_ID') - self.assertIsNone(res) - self.lb_driver._wait_for_lb_ready.assert_has_calls( - [mock.call('LB_ID', ignore_not_found=True), - mock.call('LB_ID', ignore_not_found=True)]) diff --git a/senlin/tests/unit/drivers/test_mistral_v2.py b/senlin/tests/unit/drivers/test_mistral_v2.py deleted file mode 100644 index f01232828..000000000 --- a/senlin/tests/unit/drivers/test_mistral_v2.py +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.drivers.os import mistral_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestMistralV2(base.SenlinTestCase): - - def setUp(self): - super(TestMistralV2, self).setUp() - - self.ctx = utils.dummy_context() - self.conn_params = self.ctx.to_dict() - self.mock_conn = mock.Mock() - self.mock_create = self.patchobject( - sdk, 'create_connection', - return_value=self.mock_conn) - self.workflow = self.mock_conn.workflow - - def test_init(self): - d = mistral_v2.MistralClient(self.conn_params) - - self.mock_create.assert_called_once_with(self.conn_params, - service_type='workflow') - self.assertEqual(self.mock_conn, d.conn) - - def test_workflow_find(self): - d = mistral_v2.MistralClient(self.conn_params) - - d.workflow_find('foo') - - self.workflow.find_workflow.assert_called_once_with( - 'foo', ignore_missing=True) - self.workflow.find_workflow.reset_mock() - - d.workflow_find('foo', True) - - self.workflow.find_workflow.assert_called_once_with( - 'foo', ignore_missing=True) - self.workflow.find_workflow.reset_mock() - - d.workflow_find('foo', False) - - self.workflow.find_workflow.assert_called_once_with( - 'foo', ignore_missing=False) - - def test_workflow_create(self): - d = mistral_v2.MistralClient(self.conn_params) - attrs = { - 'definition': 'fake_definition', - 'scope': 'private', - } - - d.workflow_create('fake_definition', 'private') - - self.workflow.create_workflow.assert_called_once_with(**attrs) - - def test_workflow_delete(self): - d = mistral_v2.MistralClient(self.conn_params) - - d.workflow_delete('foo', True) - - self.workflow.delete_workflow.assert_called_once_with( - 'foo', ignore_missing=True) - self.workflow.delete_workflow.reset_mock() - - d.workflow_delete('foo', False) - - self.workflow.delete_workflow.assert_called_once_with( - 'foo', ignore_missing=False) - - def test_execution_create(self): - d = mistral_v2.MistralClient(self.conn_params) - attrs = { - 'workflow_name': 'workflow_name', - 'input': 'input' - } - d.execution_create('workflow_name', 'input') - self.workflow.create_execution.assert_called_once_with(**attrs) - - def test_execution_delete(self): - d = mistral_v2.MistralClient(self.conn_params) - - d.execution_delete('goo', True) - - self.workflow.delete_execution.assert_called_once_with( - 'goo', ignore_missing=True) - - self.workflow.delete_execution.reset_mock() - - d.execution_delete('goo', False) - - self.workflow.delete_execution.assert_called_once_with( - 'goo', ignore_missing=False) - - def test_wait_for_execution(self): - self.workflow.find_execution.return_value = 'exn' - d = mistral_v2.MistralClient(self.conn_params) - - d.wait_for_execution('exn', 'STATUS1', ['STATUS2'], 5, 10) - - self.workflow.wait_for_status.assert_called_once_with( - 'exn', 'STATUS1', ['STATUS2'], 5, 10) diff --git a/senlin/tests/unit/drivers/test_neutron_v2.py b/senlin/tests/unit/drivers/test_neutron_v2.py deleted file mode 100644 index b1d85dbf8..000000000 --- a/senlin/tests/unit/drivers/test_neutron_v2.py +++ /dev/null @@ -1,158 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils - -from senlin.drivers.os import neutron_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestNeutronV2Driver(base.SenlinTestCase): - - def setUp(self): - super(TestNeutronV2Driver, self).setUp() - self.context = utils.dummy_context() - self.conn_params = self.context.to_dict() - self.conn = mock.Mock() - with mock.patch.object(sdk, 'create_connection') as mock_creare_conn: - mock_creare_conn.return_value = self.conn - self.nc = neutron_v2.NeutronClient(self.context) - - @mock.patch.object(sdk, 'create_connection') - def test_init(self, mock_create_connection): - params = self.conn_params - neutron_v2.NeutronClient(params) - mock_create_connection.assert_called_once_with(params, - service_type='network') - - def test_network_get_with_uuid(self): - net_id = uuidutils.generate_uuid() - network_obj = mock.Mock() - - self.conn.network.find_network.return_value = network_obj - res = self.nc.network_get(net_id) - self.conn.network.find_network.assert_called_once_with(net_id, False) - self.assertEqual(network_obj, res) - - def test_network_get_with_name(self): - net_id = 'network_identifier' - net1 = mock.Mock() - net2 = mock.Mock() - self.conn.network.networks.return_value = [net1, net2] - - res = self.nc.network_get(net_id) - self.assertEqual(0, self.conn.network.find_network.call_count) - self.conn.network.networks.assert_called_once_with(name=net_id) - self.assertEqual(net1, res) - - def test_port_find(self): - port_id = 'port_identifier' - port_obj = mock.Mock() - - self.conn.network.find_port.return_value = port_obj - res = self.nc.port_find(port_id) - self.conn.network.find_port.assert_called_once_with(port_id, False) - self.assertEqual(port_obj, res) - - def test_security_group_find(self): - sg_id = 'sg_identifier' - sg_obj = mock.Mock() - - self.conn.network.find_security_group.return_value = sg_obj - res = self.nc.security_group_find(sg_id) - self.conn.network.find_security_group.assert_called_once_with( - sg_id, False) - self.assertEqual(sg_obj, res) - - def test_security_group_find_with_project_id(self): - sg_id = 'sg_identifier' - sg_obj = mock.Mock() - - self.conn.network.find_security_group.return_value = sg_obj - res = self.nc.security_group_find(sg_id, project_id='fake_project_id') - self.conn.network.find_security_group.assert_called_once_with( - sg_id, False, project_id='fake_project_id') - self.assertEqual(sg_obj, res) - - def test_subnet_get(self): - subnet_id = 'subnet_identifier' - subnet_obj = mock.Mock() - - self.conn.network.find_subnet.return_value = subnet_obj - res = self.nc.subnet_get(subnet_id) - self.conn.network.find_subnet.assert_called_once_with(subnet_id, False) - self.assertEqual(subnet_obj, res) - - def test_port_create(self): - port_attr = { - 'network_id': 'foo' - } - self.nc.port_create(**port_attr) - self.conn.network.create_port.assert_called_once_with( - network_id='foo') - - def test_port_delete(self): - self.nc.port_delete(port='foo') - self.conn.network.delete_port.assert_called_once_with( - port='foo', ignore_missing=True) - - def test_port_update(self): - attr = { - 'name': 'new_name' - } - self.nc.port_update('fake_port', **attr) - self.conn.network.update_port.assert_called_once_with( - 'fake_port', **attr) - - def test_floatingip_find(self): - floatingip_id = 'fake_id' - fip_obj = mock.Mock() - - self.conn.network.find_ip.return_value = fip_obj - res = self.nc.floatingip_find(floatingip_id) - self.conn.network.find_ip.assert_called_once_with( - floatingip_id, ignore_missing=False) - self.assertEqual(fip_obj, res) - - def test_floatingip_list_by_port_id(self): - port_id = 'port_id' - fip_obj_iter = iter([mock.Mock()]) - - self.conn.network.ips.return_value = fip_obj_iter - res = self.nc.floatingip_list(port=port_id) - self.conn.network.ips.assert_called_once_with(port_id=port_id) - self.assertEqual(1, len(res)) - - def test_floatingip_create(self): - attr = { - 'network_id': 'foo' - } - self.nc.floatingip_create(**attr) - self.conn.network.create_ip.assert_called_once_with( - network_id='foo') - - def test_floatingip_delete(self): - self.nc.floatingip_delete(floating_ip='foo') - self.conn.network.delete_ip.assert_called_once_with( - 'foo', ignore_missing=True) - - def test_floatingip_update(self): - attr = { - 'port_id': 'fake_port' - } - self.nc.floatingip_update('fake_floatingip', **attr) - self.conn.network.update_ip.assert_called_once_with( - 'fake_floatingip', **attr) diff --git a/senlin/tests/unit/drivers/test_nova_v2.py b/senlin/tests/unit/drivers/test_nova_v2.py deleted file mode 100644 index 1919be371..000000000 --- a/senlin/tests/unit/drivers/test_nova_v2.py +++ /dev/null @@ -1,645 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions as sdk_exc -from unittest import mock - -from oslo_config import cfg - -from senlin.common import exception as exc -from senlin.drivers.os import nova_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestNovaV2(base.SenlinTestCase): - - def setUp(self): - super(TestNovaV2, self).setUp() - - self.ctx = utils.dummy_context() - self.conn_params = self.ctx.to_dict() - self.mock_conn = mock.Mock() - self.mock_create = self.patchobject( - sdk, 'create_connection', - return_value=self.mock_conn) - self.compute = self.mock_conn.compute - - def test_init(self): - d = nova_v2.NovaClient(self.conn_params) - self.mock_create.assert_called_once_with(self.conn_params, - service_type='compute') - self.assertEqual(self.mock_conn, d.conn) - - def test_flavor_find(self): - d = nova_v2.NovaClient(self.conn_params) - d.flavor_find('foo') - self.compute.find_flavor.assert_called_once_with('foo', False) - self.compute.find_flavor.reset_mock() - - d.flavor_find('foo', True) - self.compute.find_flavor.assert_called_once_with('foo', True) - self.compute.find_flavor.reset_mock() - - d.flavor_find('foo', False) - self.compute.find_flavor.assert_called_once_with('foo', False) - - def test_keypair_create(self): - d = nova_v2.NovaClient(self.conn_params) - d.keypair_create(name='foo') - self.compute.create_keypair.assert_called_once_with(name='foo') - - def test_keypair_delete(self): - d = nova_v2.NovaClient(self.conn_params) - d.keypair_delete('foo') - self.compute.delete_keypair.assert_called_once_with('foo', False) - self.compute.delete_keypair.reset_mock() - - d.keypair_delete('foo', True) - self.compute.delete_keypair.assert_called_once_with('foo', True) - self.compute.delete_keypair.reset_mock() - - d.keypair_delete('foo', False) - self.compute.delete_keypair.assert_called_once_with('foo', False) - - def test_keypair_find(self): - d = nova_v2.NovaClient(self.conn_params) - d.keypair_find('foo') - self.compute.find_keypair.assert_called_once_with('foo', False) - self.compute.find_keypair.reset_mock() - - d.keypair_find('foo', True) - self.compute.find_keypair.assert_called_once_with('foo', True) - self.compute.find_keypair.reset_mock() - - d.keypair_find('foo', False) - self.compute.find_keypair.assert_called_once_with('foo', False) - - def test_server_create(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_create(name='foo') - self.compute.create_server.assert_called_once_with(name='foo') - - def test_server_get(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_get('foo') - self.compute.get_server.assert_called_once_with('foo') - - def test_server_update(self): - d = nova_v2.NovaClient(self.conn_params) - attrs = {'mem': 2} - d.server_update('fakeid', **attrs) - self.compute.update_server.assert_called_once_with('fakeid', **attrs) - - def test_server_delete(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_delete('foo', True) - self.compute.delete_server.assert_called_once_with( - 'foo', ignore_missing=True) - - def test_server_force_delete(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_force_delete('foo', True) - self.compute.delete_server.assert_called_once_with( - 'foo', ignore_missing=True, force=True) - - def test_server_rebuild(self): - d = nova_v2.NovaClient(self.conn_params) - attrs = { - 'personality': '123', - 'metadata': {'k1': 'v1'} - } - d.server_rebuild('sid', 'new_image', 'new_name', 'new_pass', **attrs) - attrs.update({ - "name": 'new_name', - "admin_password": 'new_pass' - }) - self.compute.rebuild_server.assert_called_once_with( - 'sid', image='new_image', **attrs) - - def test_server_resize(self): - d = nova_v2.NovaClient(self.conn_params) - - res = d.server_resize('fakeid', 'new_flavor') - - self.assertEqual(d.conn.compute.resize_server.return_value, res) - d.conn.compute.resize_server.assert_called_once_with( - 'fakeid', 'new_flavor') - - def test_server_resize_confirm(self): - d = nova_v2.NovaClient(self.conn_params) - - res = d.server_resize_confirm('fakeid') - - self.assertEqual(d.conn.compute.confirm_server_resize.return_value, - res) - d.conn.compute.confirm_server_resize.assert_called_once_with('fakeid') - - def test_server_resize_revert(self): - d = nova_v2.NovaClient(self.conn_params) - - res = d.server_resize_revert('fakeid') - - self.assertEqual(d.conn.compute.revert_server_resize.return_value, res) - d.conn.compute.revert_server_resize.assert_called_once_with('fakeid') - - def test_server_reboot(self): - d = nova_v2.NovaClient(self.conn_params) - - res = d.server_reboot('fakeid', 'soft') - - target = d.conn.compute.reboot_server - self.assertEqual(target.return_value, res) - target.assert_called_once_with('fakeid', 'soft') - - def test_server_change_password(self): - d = nova_v2.NovaClient(self.conn_params) - - res = d.server_change_password('fakeid', 'new_password') - - target = d.conn.compute.change_server_password - self.assertEqual(target.return_value, res) - target.assert_called_once_with('fakeid', 'new_password') - - def test_server_pause(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.pause_server - - res = d.server_pause(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_unpause(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.unpause_server - - res = d.server_unpause(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_suspend(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.suspend_server - - res = d.server_suspend(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_resume(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.resume_server - - res = d.server_resume(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_lock(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.lock_server - - res = d.server_lock(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_unlock(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.unlock_server - - res = d.server_unlock(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_start(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.start_server - - res = d.server_start(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_stop(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.stop_server - - res = d.server_stop(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_rescue(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.rescue_server - - res = d.server_rescue(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server, admin_pass=None, image_ref=None) - - def test_server_rescue_with_params(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.rescue_server - - res = d.server_rescue(server, admin_pass='PASS', image_ref='IMAGE') - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server, admin_pass='PASS', - image_ref='IMAGE') - - def test_server_unrescue(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.unrescue_server - - res = d.server_unrescue(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_migrate(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.migrate_server - - res = d.server_migrate(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server) - - def test_server_evacuate(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.evacuate_server - - res = d.server_evacuate(server) - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server, host=None, admin_pass=None, - force=None) - - def test_server_evacuate_with_params(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.evacuate_server - - res = d.server_evacuate(server, host='HOST', admin_pass='PASS', - force='True') - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server, host='HOST', admin_pass='PASS', - force='True') - - def test_server_create_image(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - target = d.conn.compute.create_server_image - - res = d.server_create_image(server, 'snapshot', metadata='meta') - - self.assertEqual(target.return_value, res) - target.assert_called_once_with(server, 'snapshot', 'meta') - - def test_wait_for_server(self): - self.compute.find_server.return_value = 'foo' - - d = nova_v2.NovaClient(self.conn_params) - d.wait_for_server('foo', 'STATUS1', ['STATUS2'], 5, 10) - self.compute.wait_for_server.assert_called_once_with( - 'foo', status='STATUS1', failures=['STATUS2'], interval=5, wait=10) - - def test_wait_for_server_default_value(self): - self.compute.find_server.return_value = 'foo' - - d = nova_v2.NovaClient(self.conn_params) - d.wait_for_server('foo', timeout=10) - self.compute.wait_for_server.assert_called_once_with( - 'foo', status='ACTIVE', failures=['ERROR'], interval=2, wait=10) - - def test_wait_for_server_with_default_timeout(self): - self.compute.find_server.return_value = 'foo' - timeout = cfg.CONF.default_nova_timeout - - d = nova_v2.NovaClient(self.conn_params) - d.wait_for_server('foo') - self.compute.wait_for_server.assert_called_once_with( - 'foo', status='ACTIVE', failures=['ERROR'], interval=2, - wait=timeout) - - def test_wait_for_server_delete(self): - self.compute.find_server.return_value = 'FOO' - - d = nova_v2.NovaClient(self.conn_params) - d.wait_for_server_delete('foo', 120) - self.compute.find_server.assert_called_once_with('foo', True) - self.compute.wait_for_delete.assert_called_once_with('FOO', wait=120) - - def test_wait_for_server_delete_with_default_timeout(self): - cfg.CONF.set_override('default_nova_timeout', 360) - self.compute.find_server.return_value = 'FOO' - - d = nova_v2.NovaClient(self.conn_params) - d.wait_for_server_delete('foo') - self.compute.find_server.assert_called_once_with('foo', True) - self.compute.wait_for_delete.assert_called_once_with('FOO', wait=360) - - def test_wait_for_server_delete_server_doesnt_exist(self): - self.compute.find_server.return_value = None - - d = nova_v2.NovaClient(self.conn_params) - res = d.wait_for_server_delete('foo') - self.assertIsNone(res) - - def test_server_interface_create(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.server_interface_create(server, port_id='fake-port-id') - self.compute.create_server_interface.assert_called_once_with( - server, port_id='fake-port-id') - - def test_server_interface_list(self): - d = nova_v2.NovaClient(self.conn_params) - server = mock.Mock() - d.server_interface_list(server) - self.compute.server_interfaces.assert_called_once_with(server) - self.compute.server_interfaces.reset_mock() - - d.server_interface_list(server, k='v') - self.compute.server_interfaces.assert_called_once_with(server, k='v') - self.compute.server_interfaces.reset_mock() - - def test_server_interface_delete(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.server_interface_delete('foo', server, True) - self.compute.delete_server_interface.assert_called_once_with( - 'foo', server, True) - self.compute.delete_server_interface.reset_mock() - - d.server_interface_delete('foo', server, False) - self.compute.delete_server_interface.assert_called_once_with( - 'foo', server, False) - self.compute.delete_server_interface.reset_mock() - - d.server_interface_delete('foo', server) - self.compute.delete_server_interface.assert_called_once_with( - 'foo', server, True) - - def test_server_floatingip_associate(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.server_floatingip_associate(server, 'fake_floatingip') - self.compute.add_floating_ip_to_server.assert_called_once_with( - server, 'fake_floatingip' - ) - - def test_server_floatingip_disassociate(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.server_floatingip_disassociate(server, 'fake_floatingip') - self.compute.remove_floating_ip_from_server.assert_called_once_with( - server, 'fake_floatingip' - ) - - def test_server_metadata_get(self): - server = mock.Mock() - res_server = mock.Mock() - res_server.metadata = { - 'k1': 'v1' - } - self.compute.get_server_metadata.return_value = res_server - - d = nova_v2.NovaClient(self.conn_params) - res = d.server_metadata_get(server) - self.compute.get_server_metadata.assert_called_once_with(server) - self.assertEqual({'k1': 'v1'}, res) - - def test_server_metadata_update(self): - server = mock.Mock() - res_server = mock.Mock() - res_server.metadata = { - 'k1': 'v1', - 'k2': 'v2' - } - self.compute.get_server_metadata.return_value = res_server - - d = nova_v2.NovaClient(self.conn_params) - d.server_metadata_update(server, {'k3': 'v3', 'k4': 'v4'}) - self.compute.set_server_metadata.assert_has_calls( - [mock.call(server, k3='v3'), mock.call(server, k4='v4')], - any_order=True) - self.compute.delete_server_metadata.assert_has_calls( - [mock.call(server, ['k1']), mock.call(server, ['k2'])], - any_order=True) - - def test_server_metadata_update_forbidden(self): - server = mock.Mock() - res_server = mock.Mock() - res_server.metadata = { - 'k1': 'v1', - 'forbidden_key': 'forbidden_data', - 'k2': 'v2' - } - self.compute.get_server_metadata.return_value = res_server - self.compute.delete_server_metadata.side_effect = [ - None, sdk_exc.HttpException(http_status=403), None] - self.compute.set_server_metadata.side_effect = [ - None, sdk_exc.HttpException(http_status=403), None] - - d = nova_v2.NovaClient(self.conn_params) - d.server_metadata_update(server, {'k3': 'v3', 'k4': 'v4', 'k5': 'v5'}) - self.compute.set_server_metadata.assert_has_calls( - [mock.call(server, k3='v3'), mock.call(server, k4='v4'), - mock.call(server, k5='v5')], any_order=True) - self.compute.delete_server_metadata.assert_has_calls( - [mock.call(server, ['k1']), mock.call(server, ['forbidden_key']), - mock.call(server, ['k2'])], any_order=True) - - def test_server_metadata_delete(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.server_metadata_delete(server, 'k1') - self.compute.delete_server_metadata.assert_called_once_with( - server, 'k1') - - def test_validate_azs(self): - nc = nova_v2.NovaClient(self.conn_params) - - az1 = mock.Mock() - az1.name = 'AZ1' - az1.state = {'available': True} - az2 = mock.Mock() - az2.name = 'AZ2' - az2.state = {'available': True} - az3 = mock.Mock() - az3.name = 'AZ3' - az3.state = {'available': True} - fake_azs = [az1, az2, az3] - - self.patchobject(nc, 'availability_zone_list', return_value=fake_azs) - - result = nc.validate_azs(['AZ1', 'AZ2', 'AZ5']) - self.assertEqual(['AZ1', 'AZ2'], result) - - def test_server_group_create(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_group_create(name='sg') - self.compute.create_server_group.assert_called_once_with(name='sg') - - def test_server_group_delete(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_group_delete('sg', True) - self.compute.delete_server_group.assert_called_once_with( - 'sg', ignore_missing=True) - self.compute.delete_server_group.reset_mock() - - d.server_group_delete('sg', False) - self.compute.delete_server_group.assert_called_once_with( - 'sg', ignore_missing=False) - self.compute.delete_server_group.reset_mock() - - d.server_group_delete('sg') - self.compute.delete_server_group.assert_called_once_with( - 'sg', ignore_missing=True) - - def test_server_group_find(self): - d = nova_v2.NovaClient(self.conn_params) - d.server_group_find('sg') - self.compute.find_server_group.assert_called_once_with( - 'sg', ignore_missing=True) - self.compute.find_server_group.reset_mock() - - d.server_group_find('sg', True) - self.compute.find_server_group.assert_called_once_with( - 'sg', ignore_missing=True) - self.compute.find_server_group.reset_mock() - - d.server_group_find('sg', False) - self.compute.find_server_group.assert_called_once_with( - 'sg', ignore_missing=False) - - def test_hypervisor_list(self): - d = nova_v2.NovaClient(self.conn_params) - d.hypervisor_list() - self.compute.hypervisors.assert_called_once_with() - self.compute.hypervisors.reset_mock() - - d.hypervisor_list(k='v') - self.compute.hypervisors.assert_called_once_with(k='v') - self.compute.hypervisors.reset_mock() - - def test_hypervisor_get(self): - d = nova_v2.NovaClient(self.conn_params) - d.hypervisor_get('k') - self.compute.get_hypervisor.assert_called_once_with('k') - - def test_hypervisor_find(self): - d = nova_v2.NovaClient(self.conn_params) - self.compute.get_hypervisor.return_value = mock.Mock() - - d.hypervisor_find('k') - - self.compute.get_hypervisor.assert_called_once_with('k') - - def test_hypervisor_find_name(self): - d = nova_v2.NovaClient(self.conn_params) - self.compute.get_hypervisor.side_effect = sdk_exc.HttpException - fake_result = mock.Mock() - fake_result.name = 'FAKE_HV' - self.compute.hypervisors.return_value = [mock.Mock(name='not_it'), - fake_result] - - r = d.hypervisor_find('FAKE_HV') - - self.compute.get_hypervisor.assert_called_once_with('FAKE_HV') - self.compute.hypervisors.assert_called_once_with( - hypervisor_hostname_pattern='FAKE_HV') - self.assertEqual(r, fake_result) - - def test_hypervisor_find_name_duplicate(self): - d = nova_v2.NovaClient(self.conn_params) - self.compute.get_hypervisor.side_effect = sdk_exc.HttpException - fake_result = mock.Mock() - fake_result.name = 'FAKE_HV' - self.compute.hypervisors.return_value = [fake_result, fake_result] - - self.assertRaises(exc.InternalError, d.hypervisor_find, 'FAKE_HV') - - self.compute.get_hypervisor.assert_called_once_with('FAKE_HV') - self.compute.hypervisors.assert_called_once_with( - hypervisor_hostname_pattern='FAKE_HV') - - def test_hypervisor_find_name_ignore_missing(self): - d = nova_v2.NovaClient(self.conn_params) - self.compute.get_hypervisor.side_effect = sdk_exc.HttpException - self.compute.hypervisors.return_value = [mock.Mock(name='not_it')] - - r = d.hypervisor_find('FAKE_HV', True) - - self.compute.get_hypervisor.assert_called_once_with('FAKE_HV') - self.compute.hypervisors.assert_called_once_with( - hypervisor_hostname_pattern='FAKE_HV') - self.assertIsNone(r) - - def test_hypervisor_find_name_not_found(self): - d = nova_v2.NovaClient(self.conn_params) - self.compute.get_hypervisor.side_effect = sdk_exc.HttpException - self.compute.hypervisors.return_value = [mock.Mock(name='not_it')] - - self.assertRaises(exc.InternalError, d.hypervisor_find, 'FAKE_HV') - - self.compute.get_hypervisor.assert_called_once_with('FAKE_HV') - self.compute.hypervisors.assert_called_once_with( - hypervisor_hostname_pattern='FAKE_HV') - - def test_service_list(self): - d = nova_v2.NovaClient(self.conn_params) - d.service_list() - self.compute.services.assert_called_once() - - def test_service_force_down(self): - d = nova_v2.NovaClient(self.conn_params) - services = d.service_list() - service = services.next() - d.service_force_down(service) - self.compute.force_service_down.assert_called_once_with( - service, service.host, service.binary) - - def test_create_volume_attachment(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - kwargs = { - 'serverId': server, - 'volumeId': 'fake_volume', - } - d.create_volume_attachment(server, **kwargs) - - def test_delete_volume_attachment(self): - server = mock.Mock() - d = nova_v2.NovaClient(self.conn_params) - d.delete_volume_attachment('fake_volume', server, ignore_missing=True) diff --git a/senlin/tests/unit/drivers/test_octavia_v2.py b/senlin/tests/unit/drivers/test_octavia_v2.py deleted file mode 100644 index 491b0f397..000000000 --- a/senlin/tests/unit/drivers/test_octavia_v2.py +++ /dev/null @@ -1,342 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.drivers.os import octavia_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestOctaviaV2Driver(base.SenlinTestCase): - - def setUp(self): - super(TestOctaviaV2Driver, self).setUp() - self.context = utils.dummy_context() - self.conn_params = self.context.to_dict() - self.conn = mock.Mock() - with mock.patch.object(sdk, 'create_connection') as mock_creare_conn: - mock_creare_conn.return_value = self.conn - self.oc = octavia_v2.OctaviaClient(self.context) - - @mock.patch.object(sdk, 'create_connection') - def test_init(self, mock_create_connection): - params = self.conn_params - octavia_v2.OctaviaClient(params) - mock_create_connection.assert_called_once_with( - params, - service_type='load-balancer') - - def test_loadbalancer_get(self): - lb_id = 'loadbalancer_identifier' - loadbalancer_obj = mock.Mock() - - self.conn.load_balancer.find_load_balancer.return_value = \ - loadbalancer_obj - res = self.oc.loadbalancer_get(lb_id) - self.conn.load_balancer.find_load_balancer.assert_called_once_with( - lb_id, False) - self.assertEqual(loadbalancer_obj, res) - - def test_loadbalancer_create(self): - vip_subnet_id = 'ID1' - lb_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'vip_address': '192.168.0.100', - 'name': 'test-loadbalancer', - 'description': 'This is a loadbalancer', - 'admin_state_up': True - } - - self.conn.load_balancer.create_load_balancer.return_value = lb_obj - self.assertEqual(lb_obj, self.oc.loadbalancer_create(vip_subnet_id, - **kwargs)) - self.conn.load_balancer.create_load_balancer.assert_called_once_with( - vip_subnet_id=vip_subnet_id, **kwargs) - - # Use default input parameters - kwargs = { - 'admin_state_up': True - } - self.assertEqual(lb_obj, self.oc.loadbalancer_create(vip_subnet_id)) - self.conn.load_balancer.create_load_balancer.assert_called_with( - vip_subnet_id=vip_subnet_id, **kwargs) - - def test_loadbalancer_delete(self): - lb_id = 'ID1' - - self.oc.loadbalancer_delete(lb_id, ignore_missing=False) - self.conn.load_balancer.delete_load_balancer.assert_called_once_with( - lb_id, ignore_missing=False) - - self.oc.loadbalancer_delete(lb_id) - self.conn.load_balancer.delete_load_balancer.assert_called_with( - lb_id, ignore_missing=True) - - def test_listener_create(self): - loadbalancer_id = 'ID1' - protocol = 'HTTP' - protocol_port = 80 - listener_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'connection_limit': 100, - 'admin_state_up': True, - 'name': 'test-listener', - 'description': 'This is a listener', - } - - self.conn.load_balancer.create_listener.return_value = listener_obj - self.assertEqual(listener_obj, self.oc.listener_create( - loadbalancer_id, protocol, protocol_port, **kwargs)) - self.conn.load_balancer.create_listener.assert_called_once_with( - loadbalancer_id=loadbalancer_id, protocol=protocol, - protocol_port=protocol_port, **kwargs) - - # Use default input parameters - kwargs = { - 'admin_state_up': True - } - self.assertEqual(listener_obj, self.oc.listener_create( - loadbalancer_id, protocol, protocol_port)) - self.conn.load_balancer.create_listener.assert_called_with( - loadbalancer_id=loadbalancer_id, protocol=protocol, - protocol_port=protocol_port, **kwargs) - - def test_listener_delete(self): - listener_id = 'ID1' - - self.oc.listener_delete(listener_id, ignore_missing=False) - self.conn.load_balancer.delete_listener.assert_called_once_with( - listener_id, ignore_missing=False) - - self.oc.listener_delete(listener_id) - self.conn.load_balancer.delete_listener.assert_called_with( - listener_id, ignore_missing=True) - - def test_pool_create(self): - lb_algorithm = 'ROUND_ROBIN' - listener_id = 'ID1' - protocol = 'HTTP' - session_persistence = { - 'type': 'SOURCE_IP', - 'cookie_name': 'whatever', - } - session_persistence_expected = session_persistence.copy() - session_persistence_expected.pop('cookie_name', None) - pool_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'admin_state_up': True, - 'name': 'test-pool', - 'description': 'This is a pool', - } - - self.conn.load_balancer.create_pool.return_value = pool_obj - self.assertEqual(pool_obj, self.oc.pool_create( - lb_algorithm, listener_id, protocol, session_persistence, - **kwargs)) - self.conn.load_balancer.create_pool.assert_called_once_with( - lb_algorithm=lb_algorithm, listener_id=listener_id, - protocol=protocol, - session_persistence=session_persistence_expected, - **kwargs) - - # Use default input parameters - kwargs = { - 'admin_state_up': True - } - self.assertEqual(pool_obj, self.oc.pool_create( - lb_algorithm, listener_id, protocol, session_persistence)) - self.conn.load_balancer.create_pool.assert_called_with( - lb_algorithm=lb_algorithm, listener_id=listener_id, - protocol=protocol, - session_persistence=session_persistence_expected, - **kwargs) - - def test_pool_create_cookie_removed(self): - lb_algorithm = 'ROUND_ROBIN' - listener_id = 'ID1' - protocol = 'HTTP' - pool_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'admin_state_up': True, - 'name': 'test-pool', - 'description': 'This is a pool', - } - - self.conn.load_balancer.create_pool.return_value = pool_obj - - # Check type is APP_COOKIE - session_persistence_app_cookie = { - 'type': 'APP_COOKIE', - 'cookie_name': 'whatever', - } - self.assertEqual(pool_obj, self.oc.pool_create( - lb_algorithm, listener_id, protocol, - session_persistence_app_cookie, **kwargs)) - # cookie_name is not removed - self.conn.load_balancer.create_pool.assert_called_once_with( - lb_algorithm=lb_algorithm, listener_id=listener_id, - protocol=protocol, - session_persistence=session_persistence_app_cookie, - **kwargs) - - def test_pool_create_type_none(self): - lb_algorithm = 'ROUND_ROBIN' - listener_id = 'ID1' - protocol = 'HTTP' - session_persistence = { - 'type': 'NONE', - 'cookie_name': 'whatever', - } - session_persistence_expected = session_persistence.copy() - session_persistence_expected.pop('cookie_name', None) - pool_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'admin_state_up': True, - 'name': 'test-pool', - 'description': 'This is a pool', - } - - self.conn.load_balancer.create_pool.return_value = pool_obj - - # when type=NONE set session_persistence_type_none to None - session_persistence_expected = None - - self.assertEqual(pool_obj, self.oc.pool_create( - lb_algorithm, listener_id, protocol, - session_persistence, **kwargs)) - # cookie_name is not removed - self.conn.load_balancer.create_pool.assert_called_once_with( - lb_algorithm=lb_algorithm, listener_id=listener_id, - protocol=protocol, - session_persistence=session_persistence_expected, - **kwargs) - - def test_pool_delete(self): - pool_id = 'ID1' - - self.oc.pool_delete(pool_id, ignore_missing=False) - self.conn.load_balancer.delete_pool.assert_called_once_with( - pool_id, ignore_missing=False) - - self.oc.pool_delete(pool_id) - self.conn.load_balancer.delete_pool.assert_called_with( - pool_id, ignore_missing=True) - - def test_pool_member_create(self): - name = 'web-server-1' - pool_id = 'ID1' - address = '192.168.1.100' - protocol_port = 80 - subnet_id = 'ID2' - weight = 50 - member_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'weight': weight, - 'admin_state_up': True, - } - - self.conn.load_balancer.create_member.return_value = member_obj - self.assertEqual(member_obj, self.oc.pool_member_create( - name, pool_id, address, protocol_port, subnet_id, **kwargs)) - self.conn.load_balancer.create_member.assert_called_once_with( - pool_id, name=name, address=address, protocol_port=protocol_port, - subnet_id=subnet_id, **kwargs) - - # Use default input parameters - kwargs = { - 'admin_state_up': True - } - self.assertEqual(member_obj, self.oc.pool_member_create( - name, pool_id, address, protocol_port, subnet_id)) - self.conn.load_balancer.create_member.assert_called_with( - pool_id, name=name, address=address, protocol_port=protocol_port, - subnet_id=subnet_id, **kwargs) - - def test_pool_member_delete(self): - pool_id = 'ID1' - member_id = 'ID2' - - self.oc.pool_member_delete(pool_id, member_id, ignore_missing=False) - self.conn.load_balancer.delete_member.assert_called_once_with( - member_id, pool_id, ignore_missing=False) - - self.oc.pool_member_delete(pool_id, member_id) - self.conn.load_balancer.delete_member.assert_called_with( - member_id, pool_id, ignore_missing=True) - - def test_healthmonitor_create(self): - hm_type = 'HTTP' - delay = 30 - timeout = 10 - max_retries = 5 - pool_id = 'ID1' - hm_obj = mock.Mock() - - # All input parameters are provided - kwargs = { - 'http_method': 'test-method', - 'admin_state_up': True, - 'url_path': '/test_page', - 'expected_codes': [200, 201, 202], - } - - self.conn.load_balancer.create_health_monitor.return_value = hm_obj - res = self.oc.healthmonitor_create(hm_type, delay, timeout, - max_retries, pool_id, **kwargs) - self.assertEqual(hm_obj, res) - self.conn.load_balancer.create_health_monitor.assert_called_once_with( - type=hm_type, delay=delay, timeout=timeout, - max_retries=max_retries, pool_id=pool_id, **kwargs) - - # Use default input parameters - res = self.oc.healthmonitor_create(hm_type, delay, timeout, - max_retries, pool_id, - admin_state_up=True) - self.assertEqual(hm_obj, res) - self.conn.load_balancer.create_health_monitor.assert_called_with( - type=hm_type, delay=delay, timeout=timeout, - max_retries=max_retries, pool_id=pool_id, - admin_state_up=True) - - # hm_type other than HTTP, then other params ignored - res = self.oc.healthmonitor_create('TCP', delay, timeout, - max_retries, pool_id, **kwargs) - self.assertEqual(hm_obj, res) - self.conn.load_balancer.create_health_monitor.assert_called_with( - type='TCP', delay=delay, timeout=timeout, - max_retries=max_retries, pool_id=pool_id, - admin_state_up=True) - - def test_healthmonitor_delete(self): - healthmonitor_id = 'ID1' - - self.oc.healthmonitor_delete(healthmonitor_id, ignore_missing=False) - self.conn.load_balancer.delete_health_monitor.assert_called_once_with( - healthmonitor_id, ignore_missing=False) - - self.oc.healthmonitor_delete(healthmonitor_id) - self.conn.load_balancer.delete_health_monitor.assert_called_with( - healthmonitor_id, ignore_missing=True) diff --git a/senlin/tests/unit/drivers/test_sdk.py b/senlin/tests/unit/drivers/test_sdk.py deleted file mode 100644 index 196adea83..000000000 --- a/senlin/tests/unit/drivers/test_sdk.py +++ /dev/null @@ -1,293 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import types -from unittest import mock - -import openstack -from oslo_serialization import jsonutils -from requests import exceptions as req_exc - -from senlin.common import exception as senlin_exc -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin import version - - -class OpenStackSDKTest(base.SenlinTestCase): - - def setUp(self): - super(OpenStackSDKTest, self).setUp() - self.app_version = version.version_info.version_string() - - def test_parse_exception_http_exception_with_details(self): - details = jsonutils.dumps({ - 'error': { - 'code': 404, - 'message': 'Resource BAR is not found.' - } - }) - raw = sdk.exc.ResourceNotFound(message='A message', details=details, - response=None, http_status=404) - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(404, ex.code) - self.assertEqual('Resource BAR is not found.', str(ex)) - # key name is not 'error' case - details = jsonutils.dumps({ - 'forbidden': { - 'code': 403, - 'message': 'Quota exceeded for instances.' - } - }) - raw = sdk.exc.ResourceNotFound(message='A message', details=details, - http_status=403) - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(403, ex.code) - self.assertEqual('Quota exceeded for instances.', str(ex)) - - def test_parse_exception_http_exception_no_details(self): - resp = mock.Mock(headers={'x-openstack-request-id': 'FAKE_ID'}) - resp.json.return_value = {} - resp.status_code = 404 - - raw = sdk.exc.ResourceNotFound(message='Error', details=None, - response=resp, http_status=404) - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(404, ex.code) - self.assertEqual('Error', str(ex)) - - def test_parse_exception_http_exception_no_details_no_response(self): - details = "An error message" - - raw = sdk.exc.ResourceNotFound(message='A message.', details=details, - http_status=404) - raw.details = None - raw.response = None - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - self.assertEqual(404, ex.code) - self.assertEqual('A message.', str(ex)) - - def test_parse_exception_http_exception_code_displaced(self): - details = jsonutils.dumps({ - 'code': 400, - 'error': { - 'message': 'Resource BAR is in error state.' - } - }) - - raw = sdk.exc.HttpException( - message='A message.', details=details, http_status=400) - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(400, ex.code) - self.assertEqual('Resource BAR is in error state.', str(ex)) - - def test_parse_exception_sdk_exception(self): - raw = sdk.exc.InvalidResponse('INVALID') - - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(500, ex.code) - self.assertEqual('InvalidResponse', str(ex)) - - def test_parse_exception_request_exception(self): - raw = req_exc.HTTPError(401, 'ERROR') - - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(401, ex.code) - self.assertEqual('[Errno 401] ERROR', ex.message) - - def test_parse_exception_other_exceptions(self): - raw = Exception('Unknown Error') - - ex = self.assertRaises(senlin_exc.InternalError, - sdk.parse_exception, raw) - - self.assertEqual(500, ex.code) - self.assertEqual('Unknown Error', str(ex)) - - def test_translate_exception_wrapper(self): - @sdk.translate_exception - def test_func(driver): - return driver.__name__ - - res = sdk.translate_exception(test_func) - self.assertEqual(types.FunctionType, type(res)) - - def test_translate_exception_with_exception(self): - @sdk.translate_exception - def test_func(driver): - raise (Exception('test exception')) - - error = senlin_exc.InternalError(code=500, message='BOOM') - self.patchobject(sdk, 'parse_exception', side_effect=error) - ex = self.assertRaises(senlin_exc.InternalError, - test_func, mock.Mock()) - - self.assertEqual(500, ex.code) - self.assertEqual('BOOM', ex.message) - - @mock.patch.object(openstack, 'connect') - def test_create_connection_token(self, mock_conn): - x_conn = mock.Mock() - mock_session_client = mock.Mock() - x_conn.config.get_session_client.return_value = mock_session_client - mock_conn.return_value = x_conn - mock_session_client.get_endpoint.return_value = 'https://FAKE_URL' - - res = sdk.create_connection({'token': 'TOKEN', 'foo': 'bar'}) - - self.assertEqual(x_conn, res) - calls = [ - mock.call( - load_envvars=False, - load_yaml_config=False, - insecure=False, - cafile=None, - cert=None, - key=None, - app_name=sdk.USER_AGENT, - app_version=self.app_version, - auth_url='', - username='senlin', - password='', - project_name='service', - user_domain_name='Default', - project_domain_name='Default', - verify=True, - interface='public' - ), - mock.call().config.get_session_client( - service_type='identity', - region_name=None, - allow_version_hack=True - ), - mock.call().config.get_session_client().get_endpoint( - region_name=None, interface='public'), - mock.call(load_envvars=False, - load_yaml_config=False, - insecure=False, - cafile=None, - cert=None, - key=None, - app_name=sdk.USER_AGENT, - app_version=self.app_version, - token='TOKEN', - foo='bar', - region_name=None, - identity_api_version='3', - messaging_api_version='2', - auth_type='admin_token', - endpoint='https://FAKE_URL'), - ] - mock_conn.assert_has_calls(calls) - - @mock.patch.object(openstack, 'connect') - def test_create_connection_password(self, mock_conn): - x_conn = mock.Mock() - mock_conn.return_value = x_conn - - res = sdk.create_connection({'user_id': '123', 'password': 'abc', - 'foo': 'bar'}) - - self.assertEqual(x_conn, res) - mock_conn.assert_called_once_with( - load_envvars=False, - load_yaml_config=False, - insecure=False, - cafile=None, - cert=None, - key=None, - app_name=sdk.USER_AGENT, - app_version=self.app_version, - identity_api_version='3', - messaging_api_version='2', - region_name=None, - user_id='123', - password='abc', - foo='bar') - - @mock.patch.object(openstack, 'connect') - def test_create_connection_with_region(self, mock_conn): - x_conn = mock.Mock() - mock_conn.return_value = x_conn - - res = sdk.create_connection({'region_name': 'REGION_ONE'}) - - self.assertEqual(x_conn, res) - mock_conn.assert_called_once_with( - load_envvars=False, - load_yaml_config=False, - insecure=False, - cafile=None, - cert=None, - key=None, - app_name=sdk.USER_AGENT, - app_version=self.app_version, - identity_api_version='3', - messaging_api_version='2', - region_name='REGION_ONE') - - @mock.patch.object(openstack, 'connect') - @mock.patch.object(sdk, 'parse_exception') - def test_create_connection_with_exception(self, mock_parse, mock_conn): - ex_raw = Exception('Whatever') - mock_conn.side_effect = ex_raw - mock_parse.side_effect = senlin_exc.InternalError(code=123, - message='BOOM') - - ex = self.assertRaises(senlin_exc.InternalError, - sdk.create_connection) - - mock_conn.assert_called_once_with( - load_envvars=False, - load_yaml_config=False, - insecure=False, - cafile=None, - cert=None, - key=None, - app_name=sdk.USER_AGENT, - app_version=self.app_version, - identity_api_version='3', - messaging_api_version='2', - region_name=None) - mock_parse.assert_called_once_with(ex_raw) - self.assertEqual(123, ex.code) - self.assertEqual('BOOM', ex.message) - - @mock.patch.object(sdk, 'create_connection') - def test_authenticate(self, mock_conn): - x_conn = mock_conn.return_value - x_conn.session.get_token.return_value = 'TOKEN' - x_conn.session.get_user_id.return_value = 'test-user-id' - x_conn.session.get_project_id.return_value = 'test-project-id' - access_info = { - 'token': 'TOKEN', - 'user_id': 'test-user-id', - 'project_id': 'test-project-id' - } - - res = sdk.authenticate(foo='bar') - - self.assertEqual(access_info, res) - mock_conn.assert_called_once_with({'foo': 'bar'}) diff --git a/senlin/tests/unit/drivers/test_zaqar_v2.py b/senlin/tests/unit/drivers/test_zaqar_v2.py deleted file mode 100644 index cecbd22ea..000000000 --- a/senlin/tests/unit/drivers/test_zaqar_v2.py +++ /dev/null @@ -1,146 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from openstack import exceptions as sdk_exc - -from senlin.drivers.os import zaqar_v2 -from senlin.drivers import sdk -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestZaqarV2(base.SenlinTestCase): - - def setUp(self): - super(TestZaqarV2, self).setUp() - - self.ctx = utils.dummy_context() - self.conn_params = self.ctx.to_dict() - self.mock_conn = mock.Mock() - self.mock_create = self.patchobject( - sdk, 'create_connection', - return_value=self.mock_conn) - self.message = self.mock_conn.message - - def test_init(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - self.mock_create.assert_called_once_with(self.conn_params, - service_type='messaging') - self.assertEqual(self.mock_conn, zc.conn) - - def test_queue_create(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.queue_create(name='foo') - self.message.create_queue.assert_called_once_with(name='foo') - - def test_queue_exists(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - res = zc.queue_exists('foo') - self.message.get_queue.assert_called_once_with('foo') - self.assertTrue(res) - - def test_queue_exists_false(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - self.message.get_queue = mock.Mock() - self.message.get_queue.side_effect = sdk_exc.ResourceNotFound - - res = zc.queue_exists('foo') - self.message.get_queue.assert_called_once_with('foo') - self.assertFalse(res) - - def test_queue_delete(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.queue_delete('foo', True) - self.message.delete_queue.assert_called_once_with('foo', True) - self.message.delete_queue.reset_mock() - - zc.queue_delete('foo', False) - self.message.delete_queue.assert_called_once_with('foo', False) - self.message.delete_queue.reset_mock() - - zc.queue_delete('foo') - self.message.delete_queue.assert_called_once_with('foo', True) - - def test_subscription_create(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - attrs = {'k1': 'v1'} - zc.subscription_create('foo', **attrs) - self.message.create_subscription.assert_called_once_with( - 'foo', k1='v1') - - def test_subscription_delete(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.subscription_delete('foo', 'SUBSCRIPTION_ID', True) - self.message.delete_subscription.assert_called_once_with( - 'foo', 'SUBSCRIPTION_ID', True) - self.message.delete_subscription.reset_mock() - - zc.subscription_delete('foo', 'SUBSCRIPTION_ID', False) - self.message.delete_subscription.assert_called_once_with( - 'foo', 'SUBSCRIPTION_ID', False) - self.message.delete_subscription.reset_mock() - - zc.subscription_delete('foo', 'SUBSCRIPTION_ID') - self.message.delete_subscription.assert_called_once_with( - 'foo', 'SUBSCRIPTION_ID', True) - - def test_claim_create(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - attrs = {'k1': 'v1'} - - zc.claim_create('foo', **attrs) - - self.message.create_claim.assert_called_once_with('foo', k1='v1') - - def test_claim_delete(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.claim_delete('foo', 'CLAIM_ID', True) - self.message.delete_claim.assert_called_once_with( - 'foo', 'CLAIM_ID', True) - self.message.delete_claim.reset_mock() - - zc.claim_delete('foo', 'CLAIM_ID', False) - self.message.delete_claim.assert_called_once_with( - 'foo', 'CLAIM_ID', False) - self.message.delete_claim.reset_mock() - - zc.claim_delete('foo', 'CLAIM_ID') - self.message.delete_claim.assert_called_once_with( - 'foo', 'CLAIM_ID', True) - - def test_message_delete(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.message_delete('foo', 'MESSAGE_ID', None, True) - self.message.delete_message.assert_called_once_with( - 'foo', 'MESSAGE_ID', None, True) - self.message.delete_message.reset_mock() - - zc.message_delete('foo', 'MESSAGE_ID', None, False) - self.message.delete_message.assert_called_once_with( - 'foo', 'MESSAGE_ID', None, False) - self.message.delete_message.reset_mock() - - zc.message_delete('foo', 'MESSAGE_ID') - self.message.delete_message.assert_called_once_with( - 'foo', 'MESSAGE_ID', None, True) - self.message.delete_message.reset_mock() - - zc.message_delete('foo', 'MESSAGE_ID', 'CLAIM_ID') - self.message.delete_message.assert_called_once_with( - 'foo', 'MESSAGE_ID', 'CLAIM_ID', True) - - def test_message_post(self): - zc = zaqar_v2.ZaqarClient(self.conn_params) - zc.message_post('foo', 'MESSAGE') - self.message.post_message.assert_called_once_with('foo', 'MESSAGE') diff --git a/senlin/tests/unit/engine/__init__.py b/senlin/tests/unit/engine/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/engine/actions/__init__.py b/senlin/tests/unit/engine/actions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/engine/actions/test_action_base.py b/senlin/tests/unit/engine/actions/test_action_base.py deleted file mode 100644 index b32449360..000000000 --- a/senlin/tests/unit/engine/actions/test_action_base.py +++ /dev/null @@ -1,1202 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import eventlet -from unittest import mock - -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import exception -from senlin.common import utils as common_utils -from senlin.engine.actions import base as ab -from senlin.engine import cluster as cluster_mod -from senlin.engine import dispatcher -from senlin.engine import environment -from senlin.engine import event as EVENT -from senlin.engine import node as node_mod -from senlin.objects import action as ao -from senlin.objects import cluster_lock as cl -from senlin.objects import cluster_policy as cpo -from senlin.objects import dependency as dobj -from senlin.objects import node_lock as nl -from senlin.policies import base as policy_mod -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils -from senlin.tests.unit import fakes - -CLUSTER_ID = 'e1cfd82b-dc95-46ad-86e8-37864d7be1cd' -OBJID = '571fffb8-f41c-4cbc-945c-cb2937d76f19' -OWNER_ID = 'c7114713-ee68-409d-ba5d-0560a72a386c' -ACTION_ID = '4c2cead2-fd74-418a-9d12-bd2d9bd7a812' -USER_ID = '3c4d64baadcd437d8dd49054899e73dd' -PROJECT_ID = 'cf7a6ae28dde4f46aa8fe55d318a608f' -CHILD_IDS = ['8500ae8f-e632-4e8b-8206-552873cc2c3a', - '67c0eba9-514f-4659-9deb-99868873dfd6'] - - -class DummyAction(ab.Action): - - def __init__(self, target, action, context, **kwargs): - super(DummyAction, self).__init__(target, action, context, **kwargs) - - -class ActionBaseTest(base.SenlinTestCase): - - def setUp(self): - super(ActionBaseTest, self).setUp() - - self.ctx = utils.dummy_context(project=PROJECT_ID, user_id=USER_ID) - self.action_values = { - 'name': 'FAKE_NAME', - 'cluster_id': 'FAKE_CLUSTER_ID', - 'cause': 'FAKE_CAUSE', - 'owner': OWNER_ID, - 'interval': 60, - 'start_time': 0, - 'end_time': 0, - 'timeout': 120, - 'status': 'FAKE_STATUS', - 'status_reason': 'FAKE_STATUS_REASON', - 'inputs': {'param': 'value'}, - 'outputs': {'key': 'output_value'}, - 'created_at': timeutils.utcnow(True), - 'updated_at': None, - 'data': {'data_key': 'data_value'}, - } - - def _verify_new_action(self, obj, target, action): - self.assertIsNone(obj.id) - self.assertEqual('', obj.name) - self.assertEqual('', obj.cluster_id) - self.assertEqual(target, obj.target) - self.assertEqual(action, obj.action) - self.assertEqual('', obj.cause) - self.assertIsNone(obj.owner) - self.assertEqual(-1, obj.interval) - self.assertIsNone(obj.start_time) - self.assertIsNone(obj.end_time) - self.assertEqual(cfg.CONF.default_action_timeout, obj.timeout) - self.assertEqual('INIT', obj.status) - self.assertEqual('', obj.status_reason) - self.assertEqual({}, obj.inputs) - self.assertEqual({}, obj.outputs) - self.assertIsNone(obj.created_at) - self.assertIsNone(obj.updated_at) - self.assertEqual({}, obj.data) - - def _create_cp_binding(self, cluster_id, policy_id): - return cpo.ClusterPolicy(cluster_id=cluster_id, policy_id=policy_id, - enabled=True, id=uuidutils.generate_uuid(), - last_op=None) - - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_action_new_cluster(self, mock_load): - fake_cluster = mock.Mock(timeout=cfg.CONF.default_action_timeout) - mock_load.return_value = fake_cluster - obj = ab.Action(OBJID, 'CLUSTER_CREATE', self.ctx) - self._verify_new_action(obj, OBJID, 'CLUSTER_CREATE') - - @mock.patch.object(node_mod.Node, 'load') - def test_action_new_node(self, mock_load): - obj = ab.Action(OBJID, 'NODE_CREATE', self.ctx) - self._verify_new_action(obj, OBJID, 'NODE_CREATE') - - def test_action_init_with_values(self): - values = copy.deepcopy(self.action_values) - values['id'] = 'FAKE_ID' - values['created_at'] = 'FAKE_CREATED_TIME' - values['updated_at'] = 'FAKE_UPDATED_TIME' - - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - - self.assertEqual('FAKE_ID', obj.id) - self.assertEqual('FAKE_NAME', obj.name) - self.assertEqual('FAKE_CLUSTER_ID', obj.cluster_id) - self.assertEqual(OBJID, obj.target) - self.assertEqual('FAKE_CAUSE', obj.cause) - self.assertEqual(OWNER_ID, obj.owner) - self.assertEqual(60, obj.interval) - self.assertEqual(0, obj.start_time) - self.assertEqual(0, obj.end_time) - self.assertEqual(120, obj.timeout) - self.assertEqual('FAKE_STATUS', obj.status) - self.assertEqual('FAKE_STATUS_REASON', obj.status_reason) - self.assertEqual({'param': 'value'}, obj.inputs) - self.assertEqual({'key': 'output_value'}, obj.outputs) - self.assertEqual('FAKE_CREATED_TIME', obj.created_at) - self.assertEqual('FAKE_UPDATED_TIME', obj.updated_at) - self.assertEqual({'data_key': 'data_value'}, obj.data) - - def test_action_store_for_create(self): - values = copy.deepcopy(self.action_values) - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - - self.assertEqual(common_utils.isotime(values['created_at']), - common_utils.isotime(obj.created_at)) - self.assertIsNone(obj.updated_at) - - # store for creation - res = obj.store(self.ctx) - self.assertIsNotNone(res) - self.assertEqual(obj.id, res) - self.assertIsNotNone(obj.created_at) - self.assertIsNone(obj.updated_at) - - def test_action_store_for_update(self): - values = copy.deepcopy(self.action_values) - - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - obj_id = obj.store(self.ctx) - self.assertIsNotNone(obj_id) - self.assertIsNotNone(obj.created_at) - self.assertIsNone(obj.updated_at) - - # store for creation - res = obj.store(self.ctx) - self.assertIsNotNone(res) - self.assertEqual(obj_id, res) - self.assertEqual(obj.id, res) - self.assertIsNotNone(obj.created_at) - self.assertIsNotNone(obj.updated_at) - - def test_from_db_record(self): - values = copy.deepcopy(self.action_values) - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - obj.store(self.ctx) - - record = ao.Action.get(self.ctx, obj.id) - - action_obj = ab.Action._from_object(record) - self.assertIsInstance(action_obj, ab.Action) - self.assertEqual(obj.id, action_obj.id) - self.assertEqual(obj.cluster_id, action_obj.cluster_id) - self.assertEqual(obj.action, action_obj.action) - self.assertEqual(obj.name, action_obj.name) - self.assertEqual(obj.target, action_obj.target) - self.assertEqual(obj.cause, action_obj.cause) - self.assertEqual(obj.owner, action_obj.owner) - self.assertEqual(obj.interval, action_obj.interval) - self.assertEqual(obj.start_time, action_obj.start_time) - self.assertEqual(obj.end_time, action_obj.end_time) - self.assertEqual(obj.timeout, action_obj.timeout) - self.assertEqual(obj.status, action_obj.status) - self.assertEqual(obj.status_reason, action_obj.status_reason) - self.assertEqual(obj.inputs, action_obj.inputs) - self.assertEqual(obj.outputs, action_obj.outputs) - self.assertEqual(common_utils.isotime(obj.created_at), - common_utils.isotime(action_obj.created_at)) - self.assertEqual(obj.updated_at, action_obj.updated_at) - self.assertEqual(obj.data, action_obj.data) - self.assertEqual(obj.user, action_obj.user) - self.assertEqual(obj.project, action_obj.project) - self.assertEqual(obj.domain, action_obj.domain) - - def test_from_db_record_with_empty_fields(self): - values = copy.deepcopy(self.action_values) - del values['inputs'] - del values['outputs'] - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - obj.store(self.ctx) - record = ao.Action.get(self.ctx, obj.id) - action_obj = ab.Action._from_object(record) - self.assertEqual({}, action_obj.inputs) - self.assertEqual({}, action_obj.outputs) - - def test_load(self): - values = copy.deepcopy(self.action_values) - obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - obj.store(self.ctx) - - result = ab.Action.load(self.ctx, obj.id, None) - # no need to do a thorough test here - self.assertEqual(obj.id, result.id) - self.assertEqual(obj.action, result.action) - - db_action = ao.Action.get(self.ctx, obj.id) - result = ab.Action.load(self.ctx, None, db_action) - # no need to do a thorough test here - self.assertEqual(obj.id, result.id) - self.assertEqual(obj.action, result.action) - - def test_load_not_found(self): - # not found due to bad identity - ex = self.assertRaises(exception.ResourceNotFound, - ab.Action.load, - self.ctx, 'non-existent', None) - self.assertEqual("The action 'non-existent' could not be " - "found.", str(ex)) - - # not found due to no object - self.patchobject(ao.Action, 'get', return_value=None) - ex = self.assertRaises(exception.ResourceNotFound, - ab.Action.load, - self.ctx, 'whatever', None) - self.assertEqual("The action 'whatever' could not be found.", - str(ex)) - - @mock.patch.object(ab.Action, 'store') - def test_action_create(self, mock_store): - mock_store.return_value = 'FAKE_ID' - - result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DANCE', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_lock_cluster_false(self, mock_lock, - mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - mock_active.return_value = None - mock_lock.return_value = False - - result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_CREATE', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_lock_cluster_true(self, mock_lock, - mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - mock_active.return_value = None - mock_lock.return_value = True - - error_message = ( - 'CLUSTER_CREATE for cluster \'{}\' cannot be completed because ' - 'it is already locked.').format(OBJID) - with self.assertRaisesRegex(exception.ResourceIsLocked, - error_message): - ab.Action.create(self.ctx, OBJID, 'CLUSTER_CREATE', name='test') - - mock_store.assert_not_called() - mock_active.assert_not_called() - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(nl.NodeLock, 'is_locked') - def test_action_create_lock_node_false(self, mock_lock, - mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - mock_active.return_value = None - mock_lock.return_value = False - - result = ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_lock_cluster_true_delete(self, mock_lock, - mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - mock_active.return_value = None - mock_lock.return_value = True - - result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DELETE', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(nl.NodeLock, 'is_locked') - def test_action_create_lock_node_true(self, mock_lock, mock_active, - mock_store): - mock_store.return_value = 'FAKE_ID' - mock_active.return_value = None - mock_lock.return_value = True - - error_message = ( - 'NODE_CREATE for node \'{}\' cannot be completed because ' - 'it is already locked.').format(OBJID) - with self.assertRaisesRegex(exception.ResourceIsLocked, - error_message): - ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', name='test') - - mock_store.assert_not_called() - mock_active.assert_not_called() - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_conflict(self, mock_lock, mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' - uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' - mock_active.return_value = [ao.Action(id=uuid1), ao.Action(id=uuid2)] - mock_lock.return_value = False - - error_message = ( - 'The NODE_CREATE action for target {} conflicts with the following' - ' action\(s\): {},{}').format(OBJID, uuid1, uuid2) - with self.assertRaisesRegex(exception.ActionConflict, - error_message): - ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', name='test') - - mock_store.assert_not_called() - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_delete_no_conflict(self, mock_lock, mock_active, - mock_store): - mock_store.return_value = 'FAKE_ID' - uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' - uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' - mock_active.return_value = [ - ao.Action(id=uuid1, action='NODE_DELETE'), - ao.Action(id=uuid2, action='NODE_DELETE') - ] - mock_lock.return_value = True - - result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DELETE', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(ab.Action, 'store') - @mock.patch.object(ao.Action, 'get_all_active_by_target') - @mock.patch.object(cl.ClusterLock, 'is_locked') - def test_action_create_node_operation_no_conflict(self, mock_lock, - mock_active, mock_store): - mock_store.return_value = 'FAKE_ID' - uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' - uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' - mock_active.return_value = [ - ao.Action(id=uuid1, action='NODE_DELETE'), - ao.Action(id=uuid2, action='NODE_DELETE') - ] - mock_lock.return_value = True - - result = ab.Action.create(self.ctx, OBJID, 'NODE_OPERATION', - name='test') - - self.assertEqual('FAKE_ID', result) - mock_store.assert_called_once_with(self.ctx) - mock_active.assert_called_once_with(mock.ANY, OBJID) - - @mock.patch.object(timeutils, 'is_older_than') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - @mock.patch.object(ab.Action, 'store') - def test_action_create_scaling_cooldown_in_progress(self, mock_store, - mock_load, - mock_load_all, - mock_time_util): - cluster_id = CLUSTER_ID - # Note: policy is mocked - policy_id = uuidutils.generate_uuid() - policy = mock.Mock(id=policy_id, - TARGET=[('AFTER', 'CLUSTER_SCALE_OUT')], - event='CLUSTER_SCALE_OUT', - cooldown=240) - pb = self._create_cp_binding(cluster_id, policy.id) - pb.last_op = timeutils.utcnow(True) - mock_load_all.return_value = [pb] - mock_load.return_value = policy - mock_time_util.return_value = False - self.assertRaises(exception.ActionCooldown, ab.Action.create, self.ctx, - cluster_id, 'CLUSTER_SCALE_OUT') - self.assertEqual(0, mock_store.call_count) - - @mock.patch.object(ao.Action, 'action_list_active_scaling') - @mock.patch.object(ab.Action, 'store') - def test_action_create_scaling_conflict(self, mock_store, - mock_list_active): - cluster_id = CLUSTER_ID - - mock_action = mock.Mock() - mock_action.to_dict.return_value = {'id': 'fake_action_id'} - mock_list_active.return_value = [mock_action] - self.assertRaises(exception.ActionConflict, ab.Action.create, self.ctx, - cluster_id, 'CLUSTER_SCALE_IN') - self.assertEqual(0, mock_store.call_count) - - def test_action_delete(self): - result = ab.Action.delete(self.ctx, 'non-existent') - self.assertIsNone(result) - - values = copy.deepcopy(self.action_values) - action1 = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - action1.store(self.ctx) - - result = ab.Action.delete(self.ctx, action1.id) - self.assertIsNone(result) - - @mock.patch.object(ao.Action, 'delete') - def test_action_delete_db_call(self, mock_call): - # test db api call - ab.Action.delete(self.ctx, 'FAKE_ID') - mock_call.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(ao.Action, 'signal') - def test_action_signal_bad_command(self, mock_call): - values = copy.deepcopy(self.action_values) - action1 = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - action1.store(self.ctx) - - result = action1.signal('BOGUS') - self.assertIsNone(result) - self.assertEqual(0, mock_call.call_count) - - @mock.patch.object(ao.Action, 'signal') - def test_action_signal_cancel(self, mock_call): - values = copy.deepcopy(self.action_values) - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values) - action.store(self.ctx) - - expected = [action.INIT, action.WAITING, action.READY, action.RUNNING] - for status in expected: - action.status = status - result = action.signal(action.SIG_CANCEL) - self.assertIsNone(result) - self.assertEqual(1, mock_call.call_count) - mock_call.reset_mock() - - invalid = [action.SUSPENDED, action.SUCCEEDED, action.CANCELLED, - action.FAILED] - for status in invalid: - action.status = status - result = action.signal(action.SIG_CANCEL) - self.assertIsNone(result) - self.assertEqual(0, mock_call.call_count) - mock_call.reset_mock() - - @mock.patch.object(ao.Action, 'signal') - def test_action_signal_suspend(self, mock_call): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - - expected = [action.RUNNING] - for status in expected: - action.status = status - result = action.signal(action.SIG_SUSPEND) - self.assertIsNone(result) - self.assertEqual(1, mock_call.call_count) - mock_call.reset_mock() - - invalid = [action.INIT, action.WAITING, action.READY, action.SUSPENDED, - action.SUCCEEDED, action.CANCELLED, action.FAILED] - for status in invalid: - action.status = status - result = action.signal(action.SIG_SUSPEND) - self.assertIsNone(result) - self.assertEqual(0, mock_call.call_count) - mock_call.reset_mock() - - @mock.patch.object(ao.Action, 'signal') - def test_action_signal_resume(self, mock_call): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - - expected = [action.SUSPENDED] - for status in expected: - action.status = status - result = action.signal(action.SIG_RESUME) - self.assertIsNone(result) - self.assertEqual(1, mock_call.call_count) - mock_call.reset_mock() - - invalid = [action.INIT, action.WAITING, action.READY, action.RUNNING, - action.SUCCEEDED, action.CANCELLED, action.FAILED] - for status in invalid: - action.status = status - result = action.signal(action.SIG_RESUME) - self.assertIsNone(result) - self.assertEqual(0, mock_call.call_count) - mock_call.reset_mock() - - @mock.patch.object(ao.Action, 'signal') - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_signal_cancel(self, mock_dobj, mock_signal): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - action.load = mock.Mock() - action.set_status = mock.Mock() - mock_dobj.return_value = None - - action.status = action.RUNNING - action.signal_cancel() - - action.load.assert_not_called() - action.set_status.assert_not_called() - mock_dobj.assert_called_once_with(action.context, action.id) - mock_signal.assert_called_once_with(action.context, action.id, - action.SIG_CANCEL) - - @mock.patch.object(ao.Action, 'signal') - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_signal_cancel_children(self, mock_dobj, mock_signal): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - child_status_mock = mock.Mock() - children = [] - for child_id in CHILD_IDS: - child = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=child_id) - child.status = child.READY - child.set_status = child_status_mock - children.append(child) - mock_dobj.return_value = CHILD_IDS - action.load = mock.Mock() - action.load.side_effect = children - - action.status = action.RUNNING - action.signal_cancel() - - mock_dobj.assert_called_once_with(action.context, action.id) - child_status_mock.assert_not_called() - self.assertEqual(3, mock_signal.call_count) - self.assertEqual(2, action.load.call_count) - - @mock.patch.object(ao.Action, 'signal') - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_signal_cancel_children_lifecycle(self, mock_dobj, mock_signal): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - child_status_mock = mock.Mock() - children = [] - for child_id in CHILD_IDS: - child = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=child_id) - child.status = child.WAITING_LIFECYCLE_COMPLETION - child.set_status = child_status_mock - children.append(child) - mock_dobj.return_value = CHILD_IDS - action.load = mock.Mock() - action.load.side_effect = children - - action.status = action.RUNNING - action.signal_cancel() - - mock_dobj.assert_called_once_with(action.context, action.id) - self.assertEqual(2, child_status_mock.call_count) - self.assertEqual(3, mock_signal.call_count) - self.assertEqual(2, action.load.call_count) - - @mock.patch.object(ao.Action, 'signal') - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_signal_cancel_lifecycle(self, mock_dobj, mock_signal): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - action.load = mock.Mock() - action.set_status = mock.Mock() - mock_dobj.return_value = None - - action.status = action.WAITING_LIFECYCLE_COMPLETION - action.signal_cancel() - - action.load.assert_not_called() - action.set_status.assert_called_once_with(action.RES_CANCEL, - 'Action execution cancelled') - mock_dobj.assert_called_once_with(action.context, action.id) - mock_signal.assert_called_once_with(action.context, action.id, - action.SIG_CANCEL) - - @mock.patch.object(ao.Action, 'signal') - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_signal_cancel_immutable(self, mock_dobj, mock_signal): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - action.load = mock.Mock() - action.set_status = mock.Mock() - mock_dobj.return_value = None - - action.status = action.FAILED - self.assertRaises(exception.ActionImmutable, action.signal_cancel) - - action.load.assert_not_called() - action.set_status.assert_not_called() - mock_signal.assert_not_called() - - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_force_cancel(self, mock_dobj): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - action.load = mock.Mock() - action.set_status = mock.Mock() - action.release_lock = mock.Mock() - mock_dobj.return_value = None - - action.status = action.RUNNING - action.force_cancel() - - action.load.assert_not_called() - action.set_status.assert_called_once_with( - action.RES_CANCEL, 'Action execution force cancelled') - self.assertEqual(1, action.release_lock.call_count) - - @mock.patch.object(dobj.Dependency, 'get_depended') - def test_force_cancel_children(self, mock_dobj): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID) - child_status_mock = mock.Mock() - child_release_mock = mock.Mock() - children = [] - for child_id in CHILD_IDS: - child = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=child_id) - child.status = child.WAITING_LIFECYCLE_COMPLETION - child.set_status = child_status_mock - child.release_lock = child_release_mock - children.append(child) - mock_dobj.return_value = CHILD_IDS - action.set_status = mock.Mock() - action.release_lock = mock.Mock() - action.load = mock.Mock() - action.load.side_effect = children - - action.status = action.RUNNING - action.force_cancel() - - mock_dobj.assert_called_once_with(action.context, action.id) - self.assertEqual(2, child_status_mock.call_count) - self.assertEqual(2, child_release_mock.call_count) - self.assertEqual(2, action.load.call_count) - self.assertEqual(1, action.release_lock.call_count) - - def test_execute_default(self): - action = ab.Action.__new__(DummyAction, OBJID, 'BOOM', self.ctx) - self.assertRaises(NotImplementedError, - action.execute) - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(EVENT, 'error') - @mock.patch.object(EVENT, 'warning') - @mock.patch.object(ao.Action, 'mark_succeeded') - @mock.patch.object(ao.Action, 'mark_failed') - @mock.patch.object(ao.Action, 'mark_cancelled') - @mock.patch.object(ao.Action, 'mark_ready') - @mock.patch.object(ao.Action, 'abandon') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(eventlet, 'sleep') - def test_set_status(self, mock_sleep, mock_start, mock_abandon, - mark_ready, mark_cancel, mark_fail, - mark_succeed, mock_event, mock_error, - mock_info): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID') - action.entity = mock.Mock() - - action.set_status(action.RES_OK, 'FAKE_REASON') - - self.assertEqual(action.SUCCEEDED, action.status) - self.assertEqual('FAKE_REASON', action.status_reason) - mark_succeed.assert_called_once_with(action.context, 'FAKE_ID', - mock.ANY) - - action.set_status(action.RES_ERROR, 'FAKE_ERROR') - self.assertEqual(action.FAILED, action.status) - self.assertEqual('FAKE_ERROR', action.status_reason) - mark_fail.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY, - 'FAKE_ERROR') - - mark_fail.reset_mock() - action.set_status(action.RES_TIMEOUT, 'TIMEOUT_ERROR') - self.assertEqual(action.FAILED, action.status) - self.assertEqual('TIMEOUT_ERROR', action.status_reason) - mark_fail.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY, - 'TIMEOUT_ERROR') - - mark_fail.reset_mock() - action.set_status(action.RES_CANCEL, 'CANCELLED') - self.assertEqual(action.CANCELLED, action.status) - self.assertEqual('CANCELLED', action.status_reason) - mark_cancel.assert_called_once_with(action.context, 'FAKE_ID', - mock.ANY) - - mark_fail.reset_mock() - action.set_status(action.RES_RETRY, 'BUSY') - self.assertEqual(action.READY, action.status) - self.assertEqual('BUSY', action.status_reason) - mock_start.assert_called_once_with(action.id) - mock_sleep.assert_called_once_with(10) - mock_abandon.assert_called_once_with( - action.context, 'FAKE_ID', {'data': {'retries': 1}}) - - mark_fail.reset_mock() - action.data = {'retries': 3} - action.set_status(action.RES_RETRY, 'BUSY') - self.assertEqual(action.RES_ERROR, action.status) - mark_fail.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY, - 'BUSY') - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(EVENT, 'error') - @mock.patch.object(EVENT, 'warning') - @mock.patch.object(ao.Action, 'mark_succeeded') - @mock.patch.object(ao.Action, 'mark_failed') - @mock.patch.object(ao.Action, 'abandon') - def test_set_status_dump_event(self, mock_abandon, mark_fail, - mark_succeed, mock_warning, mock_error, - mock_info): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID') - action.entity = mock.Mock() - - action.set_status(action.RES_OK, 'FAKE_SUCCEEDED') - mock_info.assert_called_once_with(action, consts.PHASE_END, - 'FAKE_SUCCEEDED') - - action.set_status(action.RES_ERROR, 'FAKE_ERROR') - mock_error.assert_called_once_with(action, consts.PHASE_ERROR, - 'FAKE_ERROR') - - action.set_status(action.RES_RETRY, 'FAKE_RETRY') - mock_warning.assert_called_once_with(action, consts.PHASE_ERROR, - 'FAKE_RETRY') - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(EVENT, 'error') - @mock.patch.object(EVENT, 'warning') - @mock.patch.object(ao.Action, 'mark_succeeded') - @mock.patch.object(ao.Action, 'mark_failed') - @mock.patch.object(ao.Action, 'abandon') - def test_set_status_reason_is_none(self, mock_abandon, mark_fail, - mark_succeed, mock_warning, mock_error, - mock_info): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID') - action.entity = mock.Mock() - - action.set_status(action.RES_OK) - mock_info.assert_called_once_with(action, consts.PHASE_END, - 'SUCCEEDED') - - action.set_status(action.RES_ERROR) - mock_error.assert_called_once_with(action, consts.PHASE_ERROR, - 'ERROR') - - action.set_status(action.RES_RETRY) - mock_warning.assert_called_once_with(action, consts.PHASE_ERROR, - 'RETRY') - - @mock.patch.object(ao.Action, 'check_status') - def test_get_status(self, mock_get): - mock_get.return_value = 'FAKE_STATUS' - - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.id = 'FAKE_ID' - - res = action.get_status() - - self.assertEqual('FAKE_STATUS', res) - self.assertEqual('FAKE_STATUS', action.status) - mock_get.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY) - - @mock.patch.object(ab, 'wallclock') - def test_is_timeout(self, mock_time): - action = ab.Action.__new__(DummyAction, 'OBJ', 'BOOM', self.ctx) - action.start_time = 1 - action.timeout = 10 - - mock_time.return_value = 9 - self.assertFalse(action.is_timeout()) - - mock_time.return_value = 10 - self.assertFalse(action.is_timeout()) - - mock_time.return_value = 11 - self.assertFalse(action.is_timeout()) - - mock_time.return_value = 12 - self.assertTrue(action.is_timeout()) - - @mock.patch.object(EVENT, 'debug') - def test_check_signal_timeout(self, mock_debug): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID', - timeout=10) - action.entity = mock.Mock() - self.patchobject(action, 'is_timeout', return_value=True) - - res = action._check_signal() - self.assertEqual(action.RES_TIMEOUT, res) - - @mock.patch.object(ao.Action, 'signal_query') - def test_check_signal_signals_caught(self, mock_query): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.id = 'FAKE_ID' - action.timeout = 100 - self.patchobject(action, 'is_timeout', return_value=False) - sig_cmd = mock.Mock() - mock_query.return_value = sig_cmd - - res = action._check_signal() - self.assertEqual(sig_cmd, res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - - @mock.patch.object(ao.Action, 'signal_query') - def test_is_cancelled(self, mock_query): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.id = 'FAKE_ID' - action.timeout = 100 - self.patchobject(action, 'is_timeout', return_value=False) - - mock_query.return_value = action.SIG_CANCEL - res = action.is_cancelled() - self.assertTrue(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - mock_query.reset_mock() - - mock_query.return_value = None - res = action.is_cancelled() - self.assertFalse(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - - @mock.patch.object(ao.Action, 'signal_query') - def test_is_suspended(self, mock_query): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.id = 'FAKE_ID' - action.timeout = 100 - self.patchobject(action, 'is_timeout', return_value=False) - - mock_query.return_value = action.SIG_SUSPEND - res = action.is_suspended() - self.assertTrue(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - mock_query.reset_mock() - - mock_query.return_value = 'OTHERS' - res = action.is_suspended() - self.assertFalse(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - - @mock.patch.object(ao.Action, 'signal_query') - def test_is_resumed(self, mock_query): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.id = 'FAKE_ID' - action.timeout = 100 - self.patchobject(action, 'is_timeout', return_value=False) - - mock_query.return_value = action.SIG_RESUME - res = action.is_resumed() - self.assertTrue(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - mock_query.reset_mock() - - mock_query.return_value = 'OTHERS' - res = action.is_resumed() - self.assertFalse(res) - mock_query.assert_called_once_with(action.context, 'FAKE_ID') - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_policy_check_target_invalid(self, mock_load): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - - res = action.policy_check('FAKE_CLUSTER', 'WHEN') - - self.assertIsNone(res) - self.assertEqual(0, mock_load.call_count) - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_policy_check_no_bindings(self, mock_load): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - mock_load.return_value = [] - - res = action.policy_check('FAKE_CLUSTER', 'BEFORE') - - self.assertIsNone(res) - self.assertEqual(policy_mod.CHECK_OK, action.data['status']) - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER', - sort='priority', - filters={'enabled': True}) - - @mock.patch.object(dobj.Dependency, 'get_depended') - @mock.patch.object(dobj.Dependency, 'get_dependents') - def test_action_to_dict(self, mock_dep_by, mock_dep_on): - mock_dep_on.return_value = ['ACTION_1'] - mock_dep_by.return_value = ['ACTION_2'] - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, - **self.action_values) - action.id = 'FAKE_ID' - ts = common_utils.isotime(self.action_values['created_at']) - expected = { - 'id': 'FAKE_ID', - 'name': 'FAKE_NAME', - 'cluster_id': 'FAKE_CLUSTER_ID', - 'action': 'OBJECT_ACTION', - 'target': OBJID, - 'cause': 'FAKE_CAUSE', - 'owner': OWNER_ID, - 'interval': 60, - 'start_time': 0, - 'end_time': 0, - 'timeout': 120, - 'status': 'FAKE_STATUS', - 'status_reason': 'FAKE_STATUS_REASON', - 'inputs': {'param': 'value'}, - 'outputs': {'key': 'output_value'}, - 'depends_on': ['ACTION_1'], - 'depended_by': ['ACTION_2'], - 'created_at': ts, - 'updated_at': None, - 'data': {'data_key': 'data_value'}, - 'user': USER_ID, - 'project': PROJECT_ID, - } - - res = action.to_dict() - self.assertEqual(expected, res) - mock_dep_on.assert_called_once_with(action.context, 'FAKE_ID') - mock_dep_by.assert_called_once_with(action.context, 'FAKE_ID') - - -class ActionPolicyCheckTest(base.SenlinTestCase): - - def setUp(self): - super(ActionPolicyCheckTest, self).setUp() - - self.ctx = utils.dummy_context() - environment.global_env().register_policy('DummyPolicy', - fakes.TestPolicy) - - def _create_policy(self): - values = { - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - } - policy = fakes.TestPolicy('DummyPolicy', 'test-policy', **values) - policy.store(self.ctx) - return policy - - def _create_cp_binding(self, cluster_id, policy_id): - return cpo.ClusterPolicy(cluster_id=cluster_id, policy_id=policy_id, - enabled=True, id=uuidutils.generate_uuid(), - last_op=None) - - @mock.patch.object(policy_mod.Policy, 'post_op') - @mock.patch.object(policy_mod.Policy, 'pre_op') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - def test_policy_check_missing_target(self, mock_load, mock_load_all, - mock_pre_op, mock_post_op): - cluster_id = CLUSTER_ID - # Note: policy is mocked - spec = { - 'type': 'TestPolicy', - 'version': '1.0', - 'properties': {'KEY2': 5}, - } - policy = fakes.TestPolicy('test-policy', spec) - policy.id = uuidutils.generate_uuid() - policy.TARGET = [('BEFORE', 'OBJECT_ACTION')] - # Note: policy binding is created but not stored - pb = self._create_cp_binding(cluster_id, policy.id) - self.assertIsNone(pb.last_op) - mock_load_all.return_value = [pb] - mock_load.return_value = policy - mock_pre_op.return_value = None - mock_post_op.return_value = None - action = ab.Action(cluster_id, 'OBJECT_ACTION_1', self.ctx) - - res = action.policy_check(cluster_id, 'AFTER') - - self.assertIsNone(res) - self.assertEqual(policy_mod.CHECK_OK, action.data['status']) - mock_load_all.assert_called_once_with( - action.context, cluster_id, sort='priority', - filters={'enabled': True}) - mock_load.assert_called_once_with(action.context, policy.id, - project_safe=False) - # last_op was updated anyway - self.assertEqual(action.inputs['last_op'], pb.last_op) - # neither pre_op nor post_op was called, because target not match - self.assertEqual(0, mock_pre_op.call_count) - self.assertEqual(0, mock_post_op.call_count) - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - def test_policy_check_pre_op(self, mock_load, mock_load_all): - cluster_id = CLUSTER_ID - # Note: policy is mocked - spec = { - 'type': 'TestPolicy', - 'version': '1.0', - 'properties': {'KEY2': 5}, - } - policy = fakes.TestPolicy('test-policy', spec) - policy.id = uuidutils.generate_uuid() - policy.TARGET = [('BEFORE', 'OBJECT_ACTION')] - # Note: policy binding is created but not stored - pb = self._create_cp_binding(cluster_id, policy.id) - self.assertIsNone(pb.last_op) - mock_load_all.return_value = [pb] - mock_load.return_value = policy - entity = mock.Mock() - action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) - action.entity = entity - - res = action.policy_check(cluster_id, 'BEFORE') - - self.assertIsNone(res) - self.assertEqual(policy_mod.CHECK_OK, action.data['status']) - mock_load_all.assert_called_once_with( - action.context, cluster_id, sort='priority', - filters={'enabled': True}) - mock_load.assert_called_once_with(action.context, policy.id, - project_safe=False) - # last_op was not updated - self.assertIsNone(pb.last_op) - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - def test_policy_check_post_op(self, mock_load, mock_load_all): - cluster_id = CLUSTER_ID - # Note: policy is mocked - policy = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0, - TARGET=[('AFTER', 'OBJECT_ACTION')]) - # Note: policy binding is created but not stored - pb = self._create_cp_binding(cluster_id, policy.id) - self.assertIsNone(pb.last_op) - mock_load_all.return_value = [pb] - mock_load.return_value = policy - entity = mock.Mock() - action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) - action.entity = entity - - res = action.policy_check(CLUSTER_ID, 'AFTER') - - self.assertIsNone(res) - self.assertEqual(policy_mod.CHECK_OK, action.data['status']) - mock_load_all.assert_called_once_with( - action.context, cluster_id, sort='priority', - filters={'enabled': True}) - mock_load.assert_called_once_with(action.context, policy.id, - project_safe=False) - # last_op was updated for POST check - self.assertEqual(action.inputs['last_op'], pb.last_op) - # pre_op is called, but post_op was not called - self.assertEqual(0, policy.pre_op.call_count) - policy.post_op.assert_called_once_with(cluster_id, action) - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - def test_policy_check_abort_in_middle(self, mock_load, - mock_load_all): - cluster_id = CLUSTER_ID - # Note: both policies are mocked - policy1 = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0, - TARGET=[('AFTER', 'OBJECT_ACTION')]) - policy1.name = 'P1' - policy2 = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0, - TARGET=[('AFTER', 'OBJECT_ACTION')]) - policy2.name = 'P2' - action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) - action.data = mock.MagicMock() - - # mock action.data to return error for the first policy call - # (i.e. after policy1 post_op method has been called). - # this should stop the policy check and prevent the - # the policy2 post_op method from being called. - action.data.__getitem__.side_effect = [policy_mod.CHECK_ERROR, ''] - - # Note: policy binding is created but not stored - pb1 = self._create_cp_binding(cluster_id, policy1.id) - pb2 = self._create_cp_binding(cluster_id, policy2.id) - mock_load_all.return_value = [pb1, pb2] - # mock return value for two calls - mock_load.side_effect = [policy1, policy2] - - res = action.policy_check(cluster_id, 'AFTER') - - self.assertIsNone(res) - - # post_op from policy1 was called, but post_op from policy2 was not - policy1.post_op.assert_called_once_with(cluster_id, action) - self.assertEqual(0, policy2.post_op.call_count) - - mock_load_all.assert_called_once_with( - action.context, cluster_id, sort='priority', - filters={'enabled': True}) - calls = [mock.call(action.context, policy1.id, project_safe=False)] - mock_load.assert_has_calls(calls) - - -class ActionProcTest(base.SenlinTestCase): - - def setUp(self): - super(ActionProcTest, self).setUp() - - self.ctx = utils.dummy_context() - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(ab.Action, 'load') - @mock.patch.object(ao.Action, 'mark_succeeded') - def test_action_proc_successful(self, mock_mark, mock_load, - mock_event_info): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - action.is_cancelled = mock.Mock() - action.is_cancelled.return_value = False - mock_obj = mock.Mock() - action.entity = mock_obj - self.patchobject(action, 'execute', - return_value=(action.RES_OK, 'BIG SUCCESS')) - mock_status = self.patchobject(action, 'set_status') - mock_load.return_value = action - - res = ab.ActionProc(self.ctx, 'ACTION_ID') - - self.assertTrue(res) - mock_load.assert_called_once_with(self.ctx, action_id='ACTION_ID', - project_safe=False) - mock_event_info.assert_called_once_with(action, 'start', 'ACTION_I') - mock_status.assert_called_once_with(action.RES_OK, 'BIG SUCCESS') - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(ab.Action, 'load') - @mock.patch.object(ao.Action, 'mark_failed') - def test_action_proc_failed_error(self, mock_mark, mock_load, mock_info): - action = ab.Action(OBJID, 'CLUSTER_ACTION', self.ctx, id=ACTION_ID) - action.is_cancelled = mock.Mock() - action.is_cancelled.return_value = False - action.entity = mock.Mock(id=CLUSTER_ID, name='fake-cluster') - - self.patchobject(action, 'execute', side_effect=Exception('Boom!')) - mock_status = self.patchobject(action, 'set_status') - mock_load.return_value = action - - res = ab.ActionProc(self.ctx, 'ACTION') - - self.assertFalse(res) - mock_load.assert_called_once_with(self.ctx, action_id='ACTION', - project_safe=False) - mock_info.assert_called_once_with(action, 'start', 'ACTION') - mock_status.assert_called_once_with(action.RES_ERROR, 'Boom!') - - @mock.patch.object(EVENT, 'info') - @mock.patch.object(ab.Action, 'load') - @mock.patch.object(ao.Action, 'mark_failed') - def test_action_proc_is_cancelled(self, mock_mark, mock_load, mock_info): - action = ab.Action(OBJID, 'CLUSTER_ACTION', self.ctx, id=ACTION_ID) - action.is_cancelled = mock.Mock() - action.is_cancelled.return_value = True - action.entity = mock.Mock(id=CLUSTER_ID, name='fake-cluster') - - mock_status = self.patchobject(action, 'set_status') - mock_load.return_value = action - - res = ab.ActionProc(self.ctx, 'ACTION') - self.assertIs(True, res) - - mock_load.assert_called_once_with(self.ctx, action_id='ACTION', - project_safe=False) - - mock_info.assert_not_called() - mock_status.assert_called_once_with( - action.RES_CANCEL, - 'CLUSTER_ACTION [%s] cancelled' % ACTION_ID[:8]) diff --git a/senlin/tests/unit/engine/actions/test_add_nodes.py b/senlin/tests/unit/engine/actions/test_add_nodes.py deleted file mode 100644 index 394a24f95..000000000 --- a/senlin/tests/unit/engine/actions/test_add_nodes.py +++ /dev/null @@ -1,296 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.engine import node as nm -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterAddNodesTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterAddNodesTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(nm.Node, 'load') - def test_do_add_nodes_single(self, mock_load_node, mock_wait, mock_start, - mock_update, mock_dep, mock_action, - mock_count, mock_get, mock_load): - cluster = mock.Mock(id='CLUSTER_ID', min_size=1, max_size=5) - mock_load.return_value = cluster - mock_count.return_value = 2 - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - id='CLUSTER_ACTION_ID', - inputs={'nodes': ['NODE_1']}, - data={}, outputs={}) - db_node = mock.Mock(id='NODE_1', cluster_id='', ACTIVE='ACTIVE', - status='ACTIVE') - mock_get.return_value = db_node - mock_action.return_value = 'NODE_ACTION_ID' - mock_wait.return_value = (action.RES_OK, 'Good to go!') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Completed adding nodes.', res_msg) - - mock_load.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_get.assert_called_once_with(action.context, 'NODE_1') - mock_count.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_action.assert_called_once_with( - action.context, 'NODE_1', 'NODE_JOIN', - name='node_join_NODE_1', - cluster_id='CLUSTER_ID', cause='Derived Action', - inputs={'cluster_id': 'CLUSTER_ID'}) - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with( - action.context, 'NODE_ACTION_ID', {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_ADD_NODES, desired_capacity=3) - self.assertEqual({'nodes_added': ['NODE_1']}, action.outputs) - self.assertEqual({'creation': {'nodes': ['NODE_1']}}, - action.data) - mock_load_node.assert_called_once_with(action.context, db_node=db_node) - cluster.add_node.assert_called_once_with(mock_load_node.return_value) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(nm.Node, 'load') - def test_do_add_nodes_multi(self, mock_load_node, mock_wait, mock_start, - mock_update, mock_dep, mock_action, - mock_count, mock_get, mock_load): - - cluster = mock.Mock(id='CLUSTER_ID', min_size=1, max_size=5) - mock_load.return_value = cluster - mock_count.return_value = 2 - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - id='CLUSTER_ACTION_ID', - inputs={'nodes': ['NODE_1', 'NODE_2']}, - outputs={}, data={}) - - node1 = mock.Mock(id='NODE_1', cluster_id='', ACTIVE='ACTIVE', - status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='', ACTIVE='ACTIVE', - status='ACTIVE') - mock_get.side_effect = [node1, node2] - node_obj_1 = mock.Mock() - node_obj_2 = mock.Mock() - mock_load_node.side_effect = [node_obj_1, node_obj_2] - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - mock_wait.return_value = (action.RES_OK, 'Good to go!') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Completed adding nodes.', res_msg) - - mock_load.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_get.assert_has_calls([ - mock.call(action.context, 'NODE_1'), - mock.call(action.context, 'NODE_2')]) - mock_count.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_action.assert_has_calls([ - mock.call(action.context, 'NODE_1', 'NODE_JOIN', - name='node_join_NODE_1', cause='Derived Action', - cluster_id='CLUSTER_ID', - inputs={'cluster_id': 'CLUSTER_ID'}), - mock.call(action.context, 'NODE_2', 'NODE_JOIN', - name='node_join_NODE_2', cause='Derived Action', - cluster_id='CLUSTER_ID', - inputs={'cluster_id': 'CLUSTER_ID'})]) - - mock_dep.assert_called_once_with( - action.context, - ['NODE_ACTION_1', 'NODE_ACTION_2'], - 'CLUSTER_ACTION_ID') - mock_update.assert_has_calls([ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}) - ]) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_ADD_NODES, desired_capacity=4) - self.assertEqual({'nodes_added': ['NODE_1', 'NODE_2']}, action.outputs) - self.assertEqual({'creation': {'nodes': ['NODE_1', 'NODE_2']}}, - action.data) - mock_load_node.assert_has_calls([ - mock.call(action.context, db_node=node1), - mock.call(action.context, db_node=node2) - ]) - cluster.add_node.assert_has_calls([ - mock.call(node_obj_1), mock.call(node_obj_2)]) - - @mock.patch.object(no.Node, 'get') - def test_do_add_nodes_node_not_found(self, mock_get, mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx, - inputs={'nodes': ['NODE_1']}) - mock_get.return_value = None - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node NODE_1 is not found.", res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_add_nodes_node_already_member(self, mock_get, mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - inputs={'nodes': ['NODE_1']}, data={}) - mock_get.return_value = mock.Mock(cluster_id='FAKE_CLUSTER') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node NODE_1 is already owned by cluster " - "FAKE_CLUSTER.", res_msg) - self.assertEqual({}, action.data) - - @mock.patch.object(no.Node, 'get') - def test_do_add_nodes_node_in_other_cluster(self, mock_get, mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - inputs={'nodes': ['NODE_1']}, data={}) - mock_get.return_value = mock.Mock(cluster_id='ANOTHER_CLUSTER') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node NODE_1 is already owned by cluster " - "ANOTHER_CLUSTER.", res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_add_nodes_node_not_active(self, mock_get, mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx, - inputs={'nodes': ['NODE_1']}, data={}) - mock_get.return_value = mock.Mock(cluster_id='', status='ERROR') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node NODE_1 is not in ACTIVE status.", res_msg) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_add_nodes_failed_check(self, mock_count, mock_get, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=2) - mock_load.return_value = cluster - node1 = mock.Mock(id='nid1', cluster_id='', ACTIVE='ACTIVE', - status='ACTIVE') - node2 = mock.Mock(id='nid2', cluster_id='', ACTIVE='ACTIVE', - status='ACTIVE') - mock_get.side_effect = [node1, node2] - inputs = {'nodes': [node1.id, node2.id]} - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs=inputs) - mock_count.return_value = 1 - - # execute - res_code, res_msg = action.do_add_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("The target capacity (3) is greater than the " - "cluster's max_size (2).", res_msg) - self.assertEqual(2, mock_get.call_count) - mock_count.assert_called_once_with(action.context, 'CID') - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(nm.Node, 'load') - def test_do_add_nodes_failed_waiting(self, mock_load_node, mock_wait, - mock_start, mock_update, mock_dep, - mock_action, mock_count, mock_get, - mock_load): - cluster = mock.Mock(id='CLUSTER_ID', min_size=1, max_size=5) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - id='CLUSTER_ACTION_ID', data={}, - inputs={'nodes': ['NODE_1']}) - mock_get.return_value = mock.Mock(id='NODE_1', cluster_id='', - status='ACTIVE', ACTIVE='ACTIVE') - mock_count.return_value = 3 - mock_action.return_value = 'NODE_ACTION_ID' - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - - # do it - res_code, res_msg = action.do_add_nodes() - - # assertions - mock_load.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_get.assert_called_once_with(action.context, 'NODE_1') - mock_count.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_action.assert_called_once_with( - action.context, 'NODE_1', 'NODE_JOIN', - name='node_join_NODE_1', cluster_id='CLUSTER_ID', - cause='Derived Action', - inputs={'cluster_id': 'CLUSTER_ID'}) - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with( - action.context, 'NODE_ACTION_ID', {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - self.assertEqual(0, cluster.eval_status.call_count) - self.assertEqual({}, action.outputs) - self.assertEqual({}, action.data) - self.assertEqual(0, mock_load_node.call_count) - self.assertEqual(0, cluster.add_node.call_count) diff --git a/senlin/tests/unit/engine/actions/test_attach_policy.py b/senlin/tests/unit/engine/actions/test_attach_policy.py deleted file mode 100644 index d8bae7a31..000000000 --- a/senlin/tests/unit/engine/actions/test_attach_policy.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterAttachPolicyTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterAttachPolicyTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_do_attach_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - cluster.policies = [] - cluster.attach_policy.return_value = True, 'OK' - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = { - 'policy_id': 'FAKE_POLICY', - 'FOO': 'BAR' - } - - # do it - res_code, res_msg = action.do_attach_policy() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('OK', res_msg) - cluster.attach_policy.assert_called_once_with( - action.context, 'FAKE_POLICY', {'FOO': 'BAR'}) - cluster.store.assert_called_once_with(action.context) - - def test_do_attach_policy_missing_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'CLID' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {} - - # do it - res_code, res_msg = action.do_attach_policy() - # assertion - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Policy not specified.', res_msg) - - def test_do_detach_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - cluster.detach_policy.return_value = True, 'Success' - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'policy_id': 'FAKE_POLICY'} - - # do it - res_code, res_msg = action.do_detach_policy() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Success', res_msg) - cluster.detach_policy.assert_called_once_with(action.context, - 'FAKE_POLICY') - cluster.store.assert_called_once_with(action.context) - - def test_do_detach_policy_missing_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'CID' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {} - - # do it - res_code, res_msg = action.do_detach_policy() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Policy not specified.', res_msg) - - def test_do_detach_policy_failed(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - cluster.detach_policy.return_value = False, 'Failure.' - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'policy_id': 'FAKE_POLICY'} - - # do it - res_code, res_msg = action.do_detach_policy() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Failure.', res_msg) - cluster.detach_policy.assert_called_once_with(action.context, - 'FAKE_POLICY') diff --git a/senlin/tests/unit/engine/actions/test_check.py b/senlin/tests/unit/engine/actions/test_check.py deleted file mode 100644 index fa94adecc..000000000 --- a/senlin/tests/unit/engine/actions/test_check.py +++ /dev/null @@ -1,202 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterCheckTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterCheckTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_check(self, mock_wait, mock_start, mock_dep, mock_action, - mock_update, mock_load): - node1 = mock.Mock(id='NODE_1') - node2 = mock.Mock(id='NODE_2') - cluster = mock.Mock(id='FAKE_ID', status='old status', - status_reason='old reason') - cluster.nodes = [node1, node2] - cluster.do_check.return_value = True - mock_load.return_value = cluster - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - - action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - # do it - res_code, res_msg = action.do_check() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster checking completed.', res_msg) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - cluster.do_check.assert_called_once_with(action.context) - mock_action.assert_has_calls([ - mock.call(action.context, 'NODE_1', 'NODE_CHECK', - name='node_check_NODE_1', - cause=consts.CAUSE_DERIVED, - inputs={}), - mock.call(action.context, 'NODE_2', 'NODE_CHECK', - name='node_check_NODE_2', - cause=consts.CAUSE_DERIVED, - inputs={}) - ]) - mock_dep.assert_called_once_with(action.context, - ['NODE_ACTION_1', 'NODE_ACTION_2'], - 'CLUSTER_ACTION_ID') - mock_update.assert_has_calls([ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}), - ]) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CHECK) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(ao.Action, 'delete_by_target') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_check_need_delete(self, mock_wait, mock_start, mock_dep, - mock_delete, mock_action, mock_update, - mock_load): - node1 = mock.Mock(id='NODE_1') - node2 = mock.Mock(id='NODE_2') - cluster = mock.Mock(id='FAKE_ID', status='old status', - status_reason='old reason') - cluster.nodes = [node1, node2] - cluster.do_check.return_value = True - mock_load.return_value = cluster - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx, - inputs={'delete_check_action': True}) - action.id = 'CLUSTER_ACTION_ID' - - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - # do it - res_code, res_msg = action.do_check() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster checking completed.', res_msg) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - cluster.do_check.assert_called_once_with(action.context) - mock_delete.assert_has_calls([ - mock.call(action.context, 'NODE_1', action=['NODE_CHECK'], - status=['SUCCEEDED', 'FAILED']), - mock.call(action.context, 'NODE_2', action=['NODE_CHECK'], - status=['SUCCEEDED', 'FAILED']) - ]) - mock_action.assert_has_calls([ - mock.call(action.context, 'NODE_1', 'NODE_CHECK', - name='node_check_NODE_1', - cause=consts.CAUSE_DERIVED, - inputs={'delete_check_action': True}), - mock.call(action.context, 'NODE_2', 'NODE_CHECK', - name='node_check_NODE_2', - cause=consts.CAUSE_DERIVED, - inputs={'delete_check_action': True}) - ]) - mock_dep.assert_called_once_with(action.context, - ['NODE_ACTION_1', 'NODE_ACTION_2'], - 'CLUSTER_ACTION_ID') - mock_update.assert_has_calls([ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}), - ]) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CHECK) - - def test_do_check_cluster_empty(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', nodes=[], status='old status', - status_reason='old reason') - cluster.do_check.return_value = True - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_CHECK', self.ctx) - - # do it - res_code, res_msg = action.do_check() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster checking completed.', res_msg) - cluster.do_check.assert_called_once_with(self.ctx) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CHECK) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_check_failed_waiting(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CLUSTER_ID', status='old status', - status_reason='old reason') - cluster.do_recover.return_value = True - cluster.nodes = [node] - mock_load.return_value = cluster - mock_action.return_value = 'NODE_ACTION_ID' - - action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - - res_code, res_msg = action.do_check() - - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) - - mock_load.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - cluster.do_check.assert_called_once_with(action.context) - mock_action.assert_called_once_with( - action.context, 'NODE_1', 'NODE_CHECK', - name='node_check_NODE_1', - inputs={}, - cause=consts.CAUSE_DERIVED, - ) - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with(action.context, 'NODE_ACTION_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CHECK) diff --git a/senlin/tests/unit/engine/actions/test_cluster_action.py b/senlin/tests/unit/engine/actions/test_cluster_action.py deleted file mode 100644 index 348ca16d8..000000000 --- a/senlin/tests/unit/engine/actions/test_cluster_action.py +++ /dev/null @@ -1,276 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.engine import senlin_lock -from senlin.objects import action as ao -from senlin.policies import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -CLUSTER_ID = 'e1cfd82b-dc95-46ad-86e8-37864d7be1cd' -OBJID = '571fffb8-f41c-4cbc-945c-cb2937d76f19' -ACTION_ID = '4c2cead2-fd74-418a-9d12-bd2d9bd7a812' - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterActionTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterActionTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ab.Action, 'policy_check') - def test_execute(self, mock_check, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_FLY', self.ctx) - action.do_fly = mock.Mock(return_value=(action.RES_OK, 'Good!')) - action.data = { - 'status': pb.CHECK_OK, - 'reason': 'Policy checking passed' - } - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Good!', res_msg) - mock_check.assert_has_calls([ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER')]) - - @mock.patch.object(ab.Action, 'policy_check') - def test_execute_failed_action(self, mock_check, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_FLY', self.ctx) - action.do_fly = mock.Mock(return_value=(action.RES_ERROR, 'Good!')) - action.data = { - 'status': pb.CHECK_OK, - 'reason': 'Policy checking passed' - } - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Good!', res_msg) - mock_check.assert_has_calls([ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER')]) - - @mock.patch.object(ab.Action, 'policy_check') - def test_execute_failed_policy_check(self, mock_check, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_FLY', self.ctx) - action.do_fly = mock.Mock(return_value=(action.RES_OK, 'Good!')) - action.data = { - 'status': pb.CHECK_ERROR, - 'reason': 'Something is wrong.' - } - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Policy check failure: Something is wrong.', res_msg) - mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE') - - @mock.patch.object(ab.Action, 'policy_check') - def test_execute_unsupported_action(self, mock_check, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DANCE', self.ctx) - action.data = { - 'status': pb.CHECK_OK, - 'reason': 'All is going well.' - } - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Unsupported action: CLUSTER_DANCE.', res_msg) - mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE') - - def test_execute_post_check_failed(self, mock_load): - def fake_check(cluster_id, target): - if target == 'BEFORE': - action.data = { - 'status': pb.CHECK_OK, - 'reason': 'Policy checking passed.' - } - else: - action.data = { - 'status': pb.CHECK_ERROR, - 'reason': 'Policy checking failed.' - } - - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_FLY', self.ctx) - action.do_fly = mock.Mock(return_value=(action.RES_OK, 'Cool!')) - mock_check = self.patchobject(action, 'policy_check', - side_effect=fake_check) - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Policy check failure: Policy checking failed.', - res_msg) - mock_check.assert_has_calls([ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER')]) - - @mock.patch.object(senlin_lock, 'cluster_lock_acquire') - @mock.patch.object(senlin_lock, 'cluster_lock_release') - def test_execute_with_locking(self, mock_release, mock_acquire, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_FLY', self.ctx) - action.id = 'ACTION_ID' - self.patchobject(action, '_execute', - return_value=(action.RES_OK, 'success')) - mock_acquire.return_value = action - - res_code, res_msg = action.execute() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('success', res_msg) - mock_load.assert_has_calls( - [mock.call(action.context, 'FAKE_CLUSTER'), - mock.call(action.context, 'FAKE_CLUSTER')]) - mock_acquire.assert_called_once_with( - self.ctx, 'FAKE_CLUSTER', 'ACTION_ID', None, - senlin_lock.CLUSTER_SCOPE, False) - mock_release.assert_called_once_with( - 'FAKE_CLUSTER', 'ACTION_ID', senlin_lock.CLUSTER_SCOPE) - - @mock.patch.object(senlin_lock, 'cluster_lock_acquire') - def test_execute_failed_locking(self, mock_acquire, mock_load): - cluster = mock.Mock() - cluster.id = 'CLUSTER_ID' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - mock_acquire.return_value = None - - res_code, res_msg = action.execute() - - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual('Failed in locking cluster.', res_msg) - mock_load.assert_called_once_with(action.context, cluster.id) - - @mock.patch.object(senlin_lock, 'cluster_lock_acquire') - @mock.patch.object(senlin_lock, 'cluster_lock_release') - def test_execute_failed_execute(self, mock_release, mock_acquire, - mock_load): - cluster = mock.Mock() - cluster.id = 'CLUSTER_ID' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'ACTION_ID' - mock_acquire.return_value = action - self.patchobject(action, '_execute', - return_value=(action.RES_ERROR, 'Failed execution.')) - - res_code, res_msg = action.execute() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Failed execution.', res_msg) - mock_load.assert_has_calls( - [mock.call(action.context, cluster.id), - mock.call(action.context, cluster.id)]) - mock_acquire.assert_called_once_with( - self.ctx, 'CLUSTER_ID', 'ACTION_ID', None, - senlin_lock.CLUSTER_SCOPE, True) - mock_release.assert_called_once_with( - 'CLUSTER_ID', 'ACTION_ID', senlin_lock.CLUSTER_SCOPE) - - def test_cancel(self, mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_DELETE', self.ctx) - res = action.cancel() - self.assertEqual(action.RES_OK, res) - - -class CompleteLifecycleProcTest(base.SenlinTestCase): - - def setUp(self): - super(CompleteLifecycleProcTest, self).setUp() - - self.ctx = utils.dummy_context() - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ab.Action, 'load') - @mock.patch.object(ao.Action, 'update') - def test_complete_lifecycle_proc_successful(self, mock_update, mock_load, - mock_dispatcher_start): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - mock_obj = mock.Mock() - action.entity = mock_obj - mock_get_status = self.patchobject(action, 'get_status') - mock_get_status.return_value = \ - consts.ACTION_WAITING_LIFECYCLE_COMPLETION - mock_load.return_value = action - - res = ca.CompleteLifecycleProc(self.ctx, 'ACTION_ID') - - self.assertTrue(res) - mock_load.assert_called_once_with(self.ctx, action_id='ACTION_ID', - project_safe=False) - mock_get_status.assert_called_once_with() - mock_update.assert_called_once_with( - self.ctx, 'ACTION_ID', - {'status': consts.ACTION_READY, - 'status_reason': 'Lifecycle complete.', - 'owner': None} - ) - mock_dispatcher_start.assert_called_once_with() - - @mock.patch.object(ab.Action, 'load') - def test_complete_lifecycle_proc_failed_action_not_found(self, mock_load): - mock_load.return_value = None - - self.assertRaises(exc.ResourceNotFound, - ca.CompleteLifecycleProc, - self.ctx, 'ACTION') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ab.Action, 'load') - @mock.patch.object(ao.Action, 'update') - def test_complete_lifecycle_proc_warning(self, mock_update, mock_load, - mock_dispatcher_start): - action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx) - mock_obj = mock.Mock() - action.entity = mock_obj - mock_get_status = self.patchobject(action, 'get_status') - mock_get_status.return_value = consts.ACTION_SUCCEEDED - mock_load.return_value = action - - res = ca.CompleteLifecycleProc(self.ctx, 'ACTION_ID') - - self.assertFalse(res) - mock_load.assert_called_once_with(self.ctx, action_id='ACTION_ID', - project_safe=False) - mock_get_status.assert_called_once_with() - mock_update.assert_not_called() - mock_dispatcher_start.assert_not_called() diff --git a/senlin/tests/unit/engine/actions/test_create.py b/senlin/tests/unit/engine/actions/test_create.py deleted file mode 100644 index ee1161018..000000000 --- a/senlin/tests/unit/engine/actions/test_create.py +++ /dev/null @@ -1,288 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.engine import node as nm -from senlin.objects import action as ao -from senlin.objects import cluster as co -from senlin.objects import dependency as dobj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterCreateTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterCreateTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(co.Cluster, 'get_next_index') - @mock.patch.object(nm, 'Node') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_create_nodes_single(self, mock_wait, mock_start, mock_dep, - mock_node, mock_index, mock_action, - mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', profile_id='FAKE_PROFILE', - user='FAKE_USER', project='FAKE_PROJECT', - domain='FAKE_DOMAIN', - config={"node.name.format": "node-$3I"}) - mock_index.return_value = 123 - node = mock.Mock(id='NODE_ID') - mock_node.return_value = node - - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - - # node_action is faked - mock_action.return_value = 'NODE_ACTION_ID' - - # do it - res_code, res_msg = action._create_nodes(1) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - mock_index.assert_called_once_with(action.context, 'CLUSTER_ID') - mock_node.assert_called_once_with('node-123', - 'FAKE_PROFILE', - 'CLUSTER_ID', - context=action.context, - user='FAKE_USER', - project='FAKE_PROJECT', - domain='FAKE_DOMAIN', - index=123, metadata={}) - node.store.assert_called_once_with(action.context) - mock_action.assert_called_once_with(action.context, 'NODE_ID', - 'NODE_CREATE', - name='node_create_NODE_ID', - cluster_id='CLUSTER_ID', - cause='Derived Action') - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_with( - action.context, 'NODE_ACTION_ID', - {'status': ab.Action.READY}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - self.assertEqual({'nodes_added': ['NODE_ID']}, action.outputs) - - @mock.patch.object(co.Cluster, 'get') - def test_create_nodes_zero(self, mock_get, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_get.return_value = mock.Mock() - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - - res_code, res_msg = action._create_nodes(0) - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('', res_msg) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(co.Cluster, 'get_next_index') - @mock.patch.object(nm, 'Node') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_create_nodes_multiple(self, mock_wait, mock_start, mock_dep, - mock_node, mock_index, mock_action, - mock_update, mock_load): - cluster = mock.Mock(id='01234567-123434', - config={"node.name.format": "node-$3I"}) - node1 = mock.Mock(id='01234567-abcdef', - data={'placement': {'region': 'regionOne'}}) - node2 = mock.Mock(id='abcdefab-123456', - data={'placement': {'region': 'regionTwo'}}) - mock_node.side_effect = [node1, node2] - mock_index.side_effect = [123, 124] - - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'placement': { - 'count': 2, - 'placements': [ - {'region': 'regionOne'}, - {'region': 'regionTwo'} - ] - } - } - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - - # node_action is faked - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - - # do it - res_code, res_msg = action._create_nodes(2) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(2, mock_index.call_count) - self.assertEqual(2, mock_node.call_count) - node1.store.assert_called_once_with(action.context) - node2.store.assert_called_once_with(action.context) - self.assertEqual(2, mock_action.call_count) - self.assertEqual(1, mock_dep.call_count) - - update_calls = [ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}) - ] - mock_update.assert_has_calls(update_calls) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - self.assertEqual({'nodes_added': [node1.id, node2.id]}, action.outputs) - self.assertEqual({'region': 'regionOne'}, node1.data['placement']) - self.assertEqual({'region': 'regionTwo'}, node2.data['placement']) - mock_node_calls = [ - mock.call('node-123', mock.ANY, '01234567-123434', - user=mock.ANY, project=mock.ANY, domain=mock.ANY, - index=123, context=mock.ANY, metadata={}, - data={'placement': {'region': 'regionOne'}}), - mock.call('node-124', mock.ANY, '01234567-123434', - user=mock.ANY, project=mock.ANY, domain=mock.ANY, - index=124, context=mock.ANY, metadata={}, - data={'placement': {'region': 'regionTwo'}}) - ] - - mock_node.assert_has_calls(mock_node_calls) - cluster.add_node.assert_has_calls([ - mock.call(node1), mock.call(node2)]) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(co.Cluster, 'get') - @mock.patch.object(nm, 'Node') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_create_nodes_multiple_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_node, mock_get, - mock_update, mock_load): - cluster = mock.Mock(id='01234567-123434', config={}) - db_cluster = mock.Mock(next_index=1) - mock_get.return_value = db_cluster - node1 = mock.Mock(id='01234567-abcdef', data={}) - node2 = mock.Mock(id='abcdefab-123456', data={}) - mock_node.side_effect = [node1, node2] - - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'placement': { - 'count': 2, - 'placements': [ - {'region': 'regionOne'}, - {'region': 'regionTwo'} - ] - } - } - mock_wait.return_value = (action.RES_ERROR, 'Waiting timed out') - - # node_action is faked - n_action_1 = mock.Mock() - n_action_2 = mock.Mock() - self.patchobject(ab, 'Action', side_effect=[n_action_1, n_action_2]) - - # do it - res_code, res_msg = action._create_nodes(2) - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Failed in creating nodes.', res_msg) - - def test_do_create_success(self, mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER', ACTIVE='ACTIVE') - cluster.do_create.return_value = True - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - - x_create_nodes = self.patchobject(action, '_create_nodes', - return_value=(action.RES_OK, 'OK')) - # do it - res_code, res_msg = action.do_create() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster creation succeeded.', res_msg) - x_create_nodes.assert_called_once_with(cluster.desired_capacity) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CREATE, created_at=mock.ANY) - - def test_do_create_failed_create_cluster(self, mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - cluster.do_create.return_value = False - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - - # do it - res_code, res_msg = action.do_create() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Cluster creation failed.', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'ERROR', 'Cluster creation failed.') - - def test_do_create_failed_create_nodes(self, mock_load): - cluster = mock.Mock(id='FAKE_ID',) - cluster.do_create.return_value = True - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - - # do it - for code in [action.RES_CANCEL, action.RES_TIMEOUT, action.RES_ERROR]: - self.patchobject(action, '_create_nodes', - return_value=(code, 'Really Bad')) - - res_code, res_msg = action.do_create() - - self.assertEqual(code, res_code) - self.assertEqual('Really Bad', res_msg) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CREATE) - cluster.eval_status.reset_mock() - - def test_do_create_failed_for_retry(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', INIT='INIT') - cluster.do_create.return_value = True - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - self.patchobject(action, '_create_nodes', - return_value=(action.RES_RETRY, 'retry')) - - # do it - res_code, res_msg = action.do_create() - - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual('retry', res_msg) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_CREATE) diff --git a/senlin/tests/unit/engine/actions/test_custom_action.py b/senlin/tests/unit/engine/actions/test_custom_action.py deleted file mode 100644 index 8e0002e80..000000000 --- a/senlin/tests/unit/engine/actions/test_custom_action.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from senlin.engine.actions import custom_action as ca -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class CustomActionTest(base.SenlinTestCase): - - def setUp(self): - super(CustomActionTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_init(self): - obj = ca.CustomAction('OBJID', 'OBJECT_ACTION', self.ctx) - self.assertIsNotNone(obj) - - def test_execute(self): - obj = ca.CustomAction('OBJID', 'OBJECT_ACTION', self.ctx) - - params = {'key': 'value'} - res = obj.execute(**params) - - self.assertEqual(obj.RES_OK, res[0]) - self.assertEqual('', res[1]) diff --git a/senlin/tests/unit/engine/actions/test_del_nodes.py b/senlin/tests/unit/engine/actions/test_del_nodes.py deleted file mode 100644 index d6b34c96f..000000000 --- a/senlin/tests/unit/engine/actions/test_del_nodes.py +++ /dev/null @@ -1,221 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterDelNodesTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterDelNodesTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ca.ClusterAction, '_sleep') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_del_nodes(self, mock_count, mock_delete, mock_get, mock_sleep, - mock_load): - - cluster = mock.Mock(id='FAKE_CLUSTER', min_size=0, max_size=5) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - id='CLUSTER_ACTION_ID', data={}, - inputs={'candidates': ['NODE_1', 'NODE_2']}) - - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_CLUSTER') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_CLUSTER') - mock_get.side_effect = [node1, node2] - mock_count.return_value = 2 - mock_delete.return_value = (action.RES_OK, 'Good to go!') - - # do it - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Completed deleting nodes.', res_msg) - - # these are default settings - expected = { - 'deletion': { - 'destroy_after_deletion': False, - 'grace_period': 0, - 'reduce_desired_capacity': True, - } - } - self.assertEqual(expected, action.data) - - mock_get.assert_has_calls([ - mock.call(action.context, 'NODE_1'), - mock.call(action.context, 'NODE_2')]) - mock_count.assert_called_once_with(action.context, 'FAKE_CLUSTER') - mock_delete.assert_called_once_with(['NODE_1', 'NODE_2']) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DEL_NODES, desired_capacity=0) - - @mock.patch.object(ca.ClusterAction, '_sleep') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_del_nodes_with_deletion_policy(self, mock_count, mock_delete, - mock_get, mock_sleep, - mock_load): - cid = 'FAKE_CLUSTER' - cluster = mock.Mock(id=cid, min_size=0, max_size=5, - desired_capacity=4) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - id='CLUSTER_ACTION_ID', - inputs={'candidates': ['NODE_1', 'NODE_2']}) - action.data = { - 'deletion': { - 'count': 2, - # the 'candidates' value will be ignored - 'candidates': ['NODE_1', 'NODE_2'], - 'destroy_after_deletion': True, - 'grace_period': 2, - 'reduce_desired_capacity': False, - } - } - - node1 = mock.Mock(id='NODE_1', cluster_id=cid) - node2 = mock.Mock(id='NODE_2', cluster_id=cid) - mock_get.side_effect = [node1, node2] - mock_count.return_value = 4 - mock_delete.return_value = (action.RES_OK, 'Good to go!') - - # do it - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Completed deleting nodes.', res_msg) - mock_get.assert_has_calls([ - mock.call(action.context, 'NODE_1'), - mock.call(action.context, 'NODE_2')]) - mock_count.assert_called_once_with(action.context, 'FAKE_CLUSTER') - mock_delete.assert_called_once_with(['NODE_1', 'NODE_2']) - self.assertTrue(action.data['deletion']['destroy_after_deletion']) - mock_sleep.assert_called_once_with(2) - # Note: desired_capacity not decreased due to policy enforcement - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DEL_NODES) - - @mock.patch.object(no.Node, 'get') - def test_do_del_nodes_node_not_found(self, mock_get, mock_load): - cluster = mock.Mock() - mock_load.return_value = cluster - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx, - inputs={'candidates': ['NODE_1', 'NODE_2']}) - mock_get.return_value = None - - # do it - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Nodes not found: ['NODE_1', 'NODE_2'].", res_msg) - expected = { - 'deletion': { - 'destroy_after_deletion': False, - 'grace_period': 0, - 'reduce_desired_capacity': True, - } - } - self.assertEqual(expected, action.data) - - @mock.patch.object(no.Node, 'get') - def test_do_del_nodes_node_not_member(self, mock_get, mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - mock_load.return_value = cluster - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx, - inputs={'candidates': ['NODE_1', 'NODE_2']}) - node1 = mock.Mock(cluster_id='') - node2 = mock.Mock(cluster_id='ANOTHER_CLUSTER') - mock_get.side_effect = [node1, node2] - - # do it - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual("Completed deleting nodes.", res_msg) - expected = { - 'deletion': { - 'destroy_after_deletion': False, - 'grace_period': 0, - 'reduce_desired_capacity': True, - } - } - self.assertEqual(expected, action.data) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_del_nodes_failed_delete(self, mock_count, mock_delete, - mock_get, mock_load): - - cluster = mock.Mock(id='FAKE_CLUSTER', min_size=0, max_size=5) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - inputs={'candidates': ['NODE_1']}, data={}) - node1 = mock.Mock(cluster_id='FAKE_CLUSTER') - mock_get.side_effect = [node1] - mock_count.return_value = 3 - mock_delete.return_value = (action.RES_ERROR, 'Things went bad.') - - # do it - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Things went bad.", res_msg) - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - mock_get.assert_called_once_with(action.context, 'NODE_1') - mock_count.assert_called_once_with(action.context, 'FAKE_CLUSTER') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DEL_NODES, desired_capacity=2) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_del_nodes_failed_check(self, mock_count, mock_get, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=2) - mock_load.return_value = cluster - node1 = mock.Mock(id='nid1', cluster_id='CID', ACTIVE='ACTIVE', - status='ACTIVE') - mock_get.side_effect = [node1] - inputs = {'candidates': [node1.id]} - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs=inputs) - mock_count.return_value = 1 - - # execute - res_code, res_msg = action.do_del_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("The target capacity (0) is less than the " - "cluster's min_size (1).", res_msg) - mock_count.assert_called_once_with(action.context, 'CID') - mock_get.assert_called_once_with(action.context, 'nid1') diff --git a/senlin/tests/unit/engine/actions/test_delete.py b/senlin/tests/unit/engine/actions/test_delete.py deleted file mode 100644 index e2ae80c0f..000000000 --- a/senlin/tests/unit/engine/actions/test_delete.py +++ /dev/null @@ -1,1096 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.engine.notifications import message as msg -from senlin.objects import action as ao -from senlin.objects import cluster_policy as cpo -from senlin.objects import dependency as dobj -from senlin.objects import node as no -from senlin.objects import receiver as ro -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterDeleteTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterDeleteTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_single(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='FAKE_CLUSTER', desired_capacity=100, config={}) - - # cluster action is real - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - action.context = self.ctx - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', cause='Derived Action', - cluster_id='FAKE_CLUSTER', inputs={}) - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_with(action.context, - 'NODE_ACTION_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - self.assertEqual(['NODE_ID'], action.outputs['nodes_removed']) - cluster.remove_node.assert_called_once_with('NODE_ID') - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_single_stop_node(self, mock_wait, mock_start, - mock_dep, mock_action, mock_update, - mock_load): - # prepare mocks - cluster = mock.Mock(id='FAKE_CLUSTER', desired_capacity=100, - config={'cluster.stop_node_before_delete': True}) - - # cluster action is real - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - create_actions = [ - mock.call(action.context, 'NODE_ID', 'NODE_OPERATION', - name='node_delete_NODE_ID', - cause='Derived Action', - cluster_id='FAKE_CLUSTER', - inputs={'operation': 'stop', - 'update_parent_status': False}), - mock.call(action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', - cluster_id='FAKE_CLUSTER', - cause='Derived Action', inputs={}) - ] - mock_action.assert_has_calls(create_actions) - dep_calls = [ - mock.call(action.context, ['NODE_ACTION_ID'], 'CLUSTER_ACTION_ID'), - mock.call(action.context, ['NODE_ACTION_ID'], 'CLUSTER_ACTION_ID'), - ] - mock_dep.assert_has_calls(dep_calls) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_ID', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_ID', {'status': 'READY'}) - ] - mock_update.assert_has_calls(update_calls) - mock_start.assert_has_calls([mock.call(), mock.call()]) - mock_wait.assert_has_calls([mock.call(), mock.call()]) - self.assertEqual(['NODE_ID'], action.outputs['nodes_removed']) - cluster.remove_node.assert_called_once_with('NODE_ID') - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_multi(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - - # do it - res_code, res_msg = action._delete_nodes(['NODE_1', 'NODE_2']) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(2, mock_action.call_count) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}) - ] - mock_update.assert_has_calls(update_calls) - self.assertEqual(1, mock_dep.call_count) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - self.assertEqual({'nodes_removed': ['NODE_1', 'NODE_2']}, - action.outputs) - cluster.remove_node.assert_has_calls([ - mock.call('NODE_1'), mock.call('NODE_2')]) - - def test_delete_empty(self, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.inputs = {'destroy_after_deletion': False} - - # do it - res_code, res_msg = action._delete_nodes([]) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('', res_msg) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_pd(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - action.data = { - 'deletion': { - 'destroy_after_deletion': False - } - } - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_LEAVE', - name='node_delete_NODE_ID', cluster_id='CLUSTER_ID', - cause='Derived Action', inputs={}) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_lifecycle_hook(self, mock_wait, mock_start, - mock_post, mock_dep, - mock_node_get, - mock_action, mock_update, - mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - action.owner = 'OWNER_ID' - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - mock_node_get.return_value = mock.Mock( - status=consts.NS_ACTIVE, id='NODE_ID', physical_id="nova-server") - - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(1, mock_dep.call_count) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', - cause='Derived Action with Lifecycle Hook', - cluster_id='CLUSTER_ID', - inputs={}) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_ID', - {'status': 'WAITING_LIFECYCLE_COMPLETION', - 'owner': 'OWNER_ID'}), - ] - mock_update.assert_has_calls(update_calls) - mock_post.assert_called_once_with('NODE_ACTION_ID', 'NODE_ID', - 'nova-server', - consts.LIFECYCLE_NODE_TERMINATION) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with(action.data['hooks']['timeout']) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_lifecycle_hook_failed_node( - self, mock_wait, mock_start, mock_post, mock_dep, mock_node_get, - mock_action, mock_update, mock_load): - self.delete_nodes_with_lifecycle_hook_invalid_node( - mock.Mock(status=consts.NS_ERROR), mock_wait, mock_start, - mock_post, mock_dep, mock_node_get, mock_action, mock_update, - mock_load) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_lifecycle_hook_missing_node( - self, mock_wait, mock_start, mock_post, mock_dep, mock_node_get, - mock_action, mock_update, mock_load): - self.delete_nodes_with_lifecycle_hook_invalid_node( - None, mock_wait, mock_start, mock_post, mock_dep, mock_node_get, - mock_action, mock_update, mock_load) - - def delete_nodes_with_lifecycle_hook_invalid_node( - self, mock_node_obj, mock_wait, mock_start, mock_post, mock_dep, - mock_node_get, mock_action, mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - action.owner = None - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - mock_node_get.return_value = mock_node_obj - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(1, mock_dep.call_count) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', - cluster_id='CLUSTER_ID', - cause='Derived Action with Lifecycle Hook', inputs={}) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_ID', - {'status': 'READY', - 'owner': None}), - ] - mock_update.assert_has_calls(update_calls) - mock_post.assert_not_called() - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with(action.data['hooks']['timeout']) - - @mock.patch.object(ao.Action, 'check_status') - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_lifecycle_hook_timeout(self, mock_wait, - mock_start, - mock_post, mock_dep, - mock_node_get, - mock_action, - mock_update, - mock_check_status, - mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - action.owner = 'OWNER_ID' - mock_wait.side_effect = [ - (action.RES_LIFECYCLE_HOOK_TIMEOUT, 'Timeout'), - (action.RES_OK, 'All dependents completed') - ] - mock_action.return_value = 'NODE_ACTION_ID' - mock_node_get.return_value = mock.Mock( - status=consts.NS_ACTIVE, id='NODE_ID', physical_id="nova-server") - mock_check_status.return_value = 'WAITING_LIFECYCLE_COMPLETION' - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(1, mock_dep.call_count) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', - cluster_id='CLUSTER_ID', - cause='Derived Action with Lifecycle Hook', inputs={}) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_ID', - {'status': 'WAITING_LIFECYCLE_COMPLETION', - 'owner': 'OWNER_ID'}), - mock.call(action.context, 'NODE_ACTION_ID', - {'status': 'READY', - 'owner': None}), - ] - mock_update.assert_has_calls(update_calls) - mock_post.assert_called_once_with('NODE_ACTION_ID', 'NODE_ID', - 'nova-server', - consts.LIFECYCLE_NODE_TERMINATION) - mock_start.assert_has_calls([mock.call(), mock.call()]) - wait_calls = [ - mock.call(action.data['hooks']['timeout']), - mock.call() - ] - mock_wait.assert_has_calls(wait_calls) - - @mock.patch.object(ab.Action, 'create') - def test_delete_nodes_with_lifecycle_hook_invalid_type(self, - mock_action, - mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'unknown_type', - 'params': { - 'queue': 'myqueue' - } - } - } - mock_action.return_value = 'NODE_ACTION_ID' - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Failed in deleting nodes: Lifecycle hook type " - "'unknown_type' is not implemented", res_msg) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - def test_delete_nodes_with_lifecycle_hook_unsupported_webhook(self, - mock_action, - mock_update, - mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'webhook', - 'params': { - 'queue': 'myqueue' - } - } - } - mock_action.return_value = 'NODE_ACTION_ID' - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Failed in deleting nodes: Lifecycle hook type " - "'webhook' is not implemented", res_msg) - - @mock.patch.object(ca.ClusterAction, '_remove_nodes_normally') - def test_delete_nodes_failed_remove_stop_node(self, mock_remove, - mock_load): - # prepare mocks - cluster = mock.Mock(id='ID', - config={'cluster.stop_node_before_delete': True}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - action.data = {} - mock_remove.side_effect = [(action.RES_TIMEOUT, 'Timeout!'), - (action.RES_OK, 'OK')] - - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual({}, action.data) - remove_calls = [ - mock.call('NODE_OPERATION', ['NODE_ID'], - {'operation': 'stop', 'update_parent_status': False}), - mock.call('NODE_DELETE', ['NODE_ID']), - ] - mock_remove.assert_has_calls(remove_calls) - - @mock.patch.object(ca.ClusterAction, '_remove_nodes_with_hook') - @mock.patch.object(ca.ClusterAction, '_remove_nodes_normally') - def test_delete_nodes_with_lifecycle_hook_failed_remove_stop_node( - self, mock_remove_normally, mock_remove_hook, mock_load): - # prepare mocks - cluster = mock.Mock(id='ID', - config={'cluster.stop_node_before_delete': True}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - lifecycle_hook = { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - action.data = { - 'hooks': lifecycle_hook, - } - mock_remove_hook.return_value = (action.RES_TIMEOUT, 'Timeout!') - mock_remove_normally.return_value = (action.RES_OK, '') - - # do it - res_code, res_msg = action._delete_nodes(['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - mock_remove_hook.assert_called_once_with( - 'NODE_OPERATION', ['NODE_ID'], lifecycle_hook, - {'operation': 'stop', 'update_parent_status': False}) - mock_remove_normally.assert_called_once_with('NODE_DELETE', - ['NODE_ID']) - - def test_do_delete_success(self, mock_load): - node1 = mock.Mock(id='NODE_1') - node2 = mock.Mock(id='NODE_2') - cluster = mock.Mock(id='FAKE_CLUSTER', nodes=[node1, node2], - DELETING='DELETING') - cluster.do_delete.return_value = True - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - - mock_delete = self.patchobject(action, '_delete_nodes', - return_value=(action.RES_OK, 'Good')) - - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Good', res_msg) - self.assertEqual({'deletion': {'destroy_after_deletion': True}}, - action.data) - cluster.set_status.assert_called_once_with(action.context, 'DELETING', - 'Deletion in progress.') - mock_delete.assert_called_once_with(['NODE_1', 'NODE_2']) - cluster.do_delete.assert_called_once_with(action.context) - - @mock.patch.object(ro.Receiver, 'get_all') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_do_delete_with_policies(self, mock_policies, - mock_receivers, mock_load): - mock_policy1 = mock.Mock() - mock_policy1.policy_id = 'POLICY_ID1' - mock_policy2 = mock.Mock() - mock_policy2.policy_id = 'POLICY_ID2' - - mock_policies.return_value = [mock_policy1, mock_policy2] - mock_receivers.return_value = [] - - node1 = mock.Mock(id='NODE_1') - node2 = mock.Mock(id='NODE_2') - cluster = mock.Mock(id='FAKE_CLUSTER', nodes=[node1, node2], - DELETING='DELETING') - cluster.do_delete.return_value = True - mock_load.return_value = cluster - - cluster.detach_policy = mock.Mock() - cluster.detach_policy.return_value = (True, 'OK') - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - - mock_delete = self.patchobject(action, '_delete_nodes', - return_value=(action.RES_OK, 'Good')) - - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Good', res_msg) - self.assertEqual({'deletion': {'destroy_after_deletion': True}}, - action.data) - cluster.set_status.assert_called_once_with(action.context, 'DELETING', - 'Deletion in progress.') - mock_delete.assert_called_once_with(['NODE_1', 'NODE_2']) - cluster.do_delete.assert_called_once_with(action.context) - detach_calls = [mock.call(action.context, 'POLICY_ID1'), - mock.call(action.context, 'POLICY_ID2')] - cluster.detach_policy.assert_has_calls(detach_calls) - - @mock.patch.object(ro.Receiver, 'delete') - @mock.patch.object(ro.Receiver, 'get_all') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - def test_do_delete_with_receivers(self, mock_policies, - mock_receivers, mock_rec_delete, - mock_load): - mock_receiver1 = mock.Mock() - mock_receiver1.id = 'RECEIVER_ID1' - mock_receiver2 = mock.Mock() - mock_receiver2.id = 'RECEIVER_ID2' - - mock_policies.return_value = [] - mock_receivers.return_value = [mock_receiver1, mock_receiver2] - - node1 = mock.Mock(id='NODE_1') - node2 = mock.Mock(id='NODE_2') - cluster = mock.Mock(id='FAKE_CLUSTER', nodes=[node1, node2], - DELETING='DELETING') - cluster.do_delete.return_value = True - mock_load.return_value = cluster - - cluster.detach_policy = mock.Mock() - cluster.detach_policy.return_value = (True, 'OK') - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - - mock_delete = self.patchobject(action, '_delete_nodes', - return_value=(action.RES_OK, 'Good')) - - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Good', res_msg) - self.assertEqual({'deletion': {'destroy_after_deletion': True}}, - action.data) - cluster.set_status.assert_called_once_with(action.context, 'DELETING', - 'Deletion in progress.') - mock_delete.assert_called_once_with(['NODE_1', 'NODE_2']) - cluster.do_delete.assert_called_once_with(action.context) - - cluster.detach_policy.assert_not_called() - rec_delete_calls = [mock.call(action.context, 'RECEIVER_ID1'), - mock.call(action.context, 'RECEIVER_ID2')] - mock_rec_delete.assert_has_calls(rec_delete_calls) - - def test_do_delete_failed_delete_nodes_timeout(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], ACTIVE='ACTIVE', - DELETING='DELETING', WARNING='WARNING') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - self.patchobject(action, '_delete_nodes', - return_value=(action.RES_TIMEOUT, 'Timeout!')) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'DELETING', 'Deletion in progress.') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DELETE) - - def test_do_delete_failed_delete_nodes_with_error(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], ACTIVE='ACTIVE', - DELETING='DELETING', WARNING='WARNING') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - self.patchobject(action, '_delete_nodes', - return_value=(action.RES_ERROR, 'Error!')) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Error!', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'DELETING', 'Deletion in progress.') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DELETE) - - def test_do_delete_failed_delete_nodes_with_cancel(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], ACTIVE='ACTIVE', - DELETING='DELETING', WARNING='WARNING') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - self.patchobject(action, '_delete_nodes', - return_value=(action.RES_CANCEL, 'Cancelled!')) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_CANCEL, res_code) - self.assertEqual('Cancelled!', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'DELETING', 'Deletion in progress.') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DELETE) - - def test_do_delete_failed_delete_nodes_with_retry(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], ACTIVE='ACTIVE', - DELETING='DELETING', WARNING='WARNING') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - self.patchobject(action, '_delete_nodes', - return_value=(action.RES_RETRY, 'Busy!')) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual('Busy!', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'DELETING', 'Deletion in progress.') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DELETE) - - def test_do_delete_failed_delete_cluster(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - - self.patchobject(action, '_delete_nodes', - return_value=(action.RES_OK, 'Good')) - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Cannot delete cluster object.', res_msg) - cluster.set_status.assert_called_once_with( - action.context, 'DELETING', 'Deletion in progress.') - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_DELETE) - - @mock.patch.object(ao.Action, 'check_status') - def test_wait_for_dependents(self, mock_check_status, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.data = {} - mock_check_status.return_value = 'READY' - - # do it - res_code, res_msg = action._wait_for_dependents() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents ended with success', res_msg) - - @mock.patch.object(ao.Action, 'check_status') - @mock.patch.object(ab.Action, 'is_cancelled') - def test_wait_for_dependents_cancelled(self, mock_cancelled, - mock_check_status, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'ID1' - action.data = {} - mock_check_status.return_value = 'RUNNING' - mock_cancelled.return_value = True - - # do it - res_code, res_msg = action._wait_for_dependents() - - self.assertEqual(action.RES_CANCEL, res_code) - self.assertEqual('CLUSTER_DELETE [ID1] cancelled', res_msg) - - @mock.patch.object(ao.Action, 'check_status') - @mock.patch.object(ab.Action, 'is_cancelled') - @mock.patch.object(ab.Action, 'is_timeout') - def test_wait_for_dependents_timeout(self, mock_timeout, mock_cancelled, - mock_check_status, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'ID1' - action.data = {} - - mock_check_status.return_value = 'RUNNING' - mock_cancelled.return_value = False - mock_timeout.return_value = True - - # do it - res_code, res_msg = action._wait_for_dependents() - - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('CLUSTER_DELETE [ID1] timeout', res_msg) - - @mock.patch.object(ao.Action, 'check_status') - @mock.patch.object(ab.Action, 'is_cancelled') - @mock.patch.object(ab.Action, 'is_timeout') - def test_wait_for_dependents_lifecycle_timeout(self, - mock_timeout, - mock_cancelled, - mock_check_status, - mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'ID1' - action.data = {} - - mock_check_status.return_value = 'RUNNING' - mock_cancelled.return_value = False - mock_timeout.side_effect = [False, True] - - # do it - res_code, res_msg = action._wait_for_dependents(0) - - self.assertEqual(action.RES_LIFECYCLE_HOOK_TIMEOUT, res_code) - self.assertEqual('CLUSTER_DELETE [ID1] lifecycle hook timeout', - res_msg) - - @mock.patch('senlin.engine.actions.base.wallclock', mock.MagicMock( - return_value=10)) - def test_is_timeout(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.start_time = 0 - action.timeout = 5 - - # do it - res = action.is_timeout() - - self.assertTrue(res) - - @mock.patch('senlin.engine.actions.base.wallclock', mock.MagicMock( - return_value=10)) - def test_is_timeout_non_default(self, mock_load): - node = mock.Mock(id='NODE_1') - cluster = mock.Mock(id='CID', nodes=[node], DELETING='DELETING') - cluster.do_delete.return_value = False - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.start_time = 0 - action.timeout = 5 - - # do it - res = action.is_timeout(20) - - self.assertEqual(False, res) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_remove_nodes_normally(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) - mock_load.return_value = cluster - - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - - # do it - res_code, res_msg = action._remove_nodes_normally('NODE_REMOVE', - ['NODE_1', 'NODE_2']) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(2, mock_action.call_count) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}) - ] - mock_update.assert_has_calls(update_calls) - self.assertEqual(1, mock_dep.call_count) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_remove_nodes_with_hook(self, mock_wait, mock_start, mock_post, - mock_dep, mock_node_get, mock_action, - mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - action.owner = 'OWNER_ID' - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.return_value = 'NODE_ACTION_ID' - mock_node_get.return_value = mock.Mock( - status=consts.NS_ACTIVE, id='NODE_ID', physical_id="nova-server") - # do it - res_code, res_msg = action._remove_nodes_with_hook( - 'NODE_DELETE', ['NODE_ID'], action.data['hooks']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - self.assertEqual(1, mock_dep.call_count) - mock_action.assert_called_once_with( - action.context, 'NODE_ID', 'NODE_DELETE', - name='node_delete_NODE_ID', cluster_id='CLUSTER_ID', - cause='Derived Action with Lifecycle Hook', inputs={}) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_ID', - {'status': 'WAITING_LIFECYCLE_COMPLETION', - 'owner': 'OWNER_ID'}), - ] - mock_update.assert_has_calls(update_calls) - mock_post.assert_called_once_with('NODE_ACTION_ID', 'NODE_ID', - 'nova-server', - consts.LIFECYCLE_NODE_TERMINATION) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with(action.data['hooks']['timeout']) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_remove_nodes_normally_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_action, - mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='ID', config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - action.data = {} - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - mock_action.return_value = 'NODE_ACTION_ID' - - # do it - res_code, res_msg = action._remove_nodes_normally('NODE_REMOVE', - ['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) - self.assertEqual({}, action.data) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_remove_nodes_hook_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_action, - mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='ID', config={}) - mock_load.return_value = cluster - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'destroy_after_deletion': False} - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - mock_action.return_value = 'NODE_ACTION_ID' - - # do it - res_code, res_msg = action._remove_nodes_normally('NODE_REMOVE', - ['NODE_ID']) - - # assertions (other assertions are skipped) - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_delete_nodes_with_error_nodes(self, mock_wait, mock_start, - mock_post, mock_dep, - mock_node_get, mock_action, - mock_update, mock_load): - # prepare mocks - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) - mock_load.return_value = cluster - - # cluster action is real - action = ca.ClusterAction(cluster.id, 'CLUSTER_DELETE', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'hooks': { - 'timeout': 10, - 'type': 'zaqar', - 'params': { - 'queue': 'myqueue' - } - } - } - action.owner = 'OWNER_ID' - mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2'] - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - node1 = mock.Mock(status=consts.NS_ACTIVE, id='NODE_1', - physical_id=None) - node2 = mock.Mock(status=consts.NS_ACTIVE, id='NODE_2', - physical_id="nova-server-1") - mock_node_get.side_effect = [node1, node2] - # do it - res_code, res_msg = action._remove_nodes_with_hook( - 'NODE_DELETE', ['NODE_1', 'NODE_2'], action.data['hooks']) - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('All dependents completed', res_msg) - update_calls = [ - mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY', - 'owner': None}), - mock.call(action.context, 'NODE_ACTION_2', - {'status': 'WAITING_LIFECYCLE_COMPLETION', - 'owner': 'OWNER_ID'}) - ] - mock_update.assert_has_calls(update_calls) - create_actions = [ - mock.call(action.context, 'NODE_1', 'NODE_DELETE', - name='node_delete_NODE_1', - cluster_id='CLUSTER_ID', - cause='Derived Action with Lifecycle Hook', inputs={}), - mock.call(action.context, 'NODE_2', 'NODE_DELETE', - name='node_delete_NODE_2', - cluster_id='CLUSTER_ID', - cause='Derived Action with Lifecycle Hook', inputs={}) - ] - mock_action.assert_has_calls(create_actions) - - mock_post.assert_called_once_with('NODE_ACTION_2', 'NODE_2', - node2.physical_id, - consts.LIFECYCLE_NODE_TERMINATION) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with(action.data['hooks']['timeout']) - - self.assertEqual(1, mock_dep.call_count) diff --git a/senlin/tests/unit/engine/actions/test_node_action.py b/senlin/tests/unit/engine/actions/test_node_action.py deleted file mode 100644 index 88406ba2e..000000000 --- a/senlin/tests/unit/engine/actions/test_node_action.py +++ /dev/null @@ -1,944 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from unittest import mock - -from senlin.common import consts -from senlin.common import scaleutils -from senlin.engine.actions import base as base_action -from senlin.engine.actions import node_action -from senlin.engine import cluster as cluster_mod -from senlin.engine import event as EVENT -from senlin.engine import node as node_mod -from senlin.engine import senlin_lock as lock -from senlin.objects import node as node_obj -from senlin.policies import base as policy_mod -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(node_mod.Node, 'load') -class NodeActionTest(base.SenlinTestCase): - - def setUp(self): - super(NodeActionTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_do_create_okay(self, mock_load): - node = mock.Mock(id='NID') - node.do_create = mock.Mock(return_value=[True, '']) - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - - res_code, res_msg = action.do_create() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node created successfully.', res_msg) - node.do_create.assert_called_once_with(action.context) - - def test_do_create_failed(self, mock_load): - node = mock.Mock(id='NID') - node.do_create = mock.Mock(return_value=[False, - 'custom error message']) - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - - # Test node creation failure path - res_code, res_msg = action.do_create() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('custom error message', res_msg) - node.do_create.assert_called_once_with(action.context) - - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_create_with_cluster_id_success(self, mock_c_load, mock_count, - mock_check, mock_load): - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=[True, '']) - mock_load.return_value = node - mock_count.return_value = 11 - mock_check.return_value = None - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - # do it - res_code, res_msg = action.do_create() - - # assertions - self.assertEqual(action.RES_OK, res_code) - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 11, None, None, True) - node.do_create.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_CREATE, desired_capacity=11) - - @mock.patch.object(node_obj.Node, 'update') - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_create_with_cluster_id_failed_checking( - self, mock_c_load, mock_count, mock_check, mock_update, mock_load): - - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=[True, '']) - mock_load.return_value = node - mock_count.return_value = 11 - mock_check.return_value = 'overflow' - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - # do it - res_code, res_msg = action.do_create() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('overflow', res_msg) - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 11, None, None, True) - mock_update.assert_called_once_with(action.context, 'NID', - {'cluster_id': '', - 'status': consts.NS_ERROR}) - self.assertEqual(0, node.do_create.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_create_with_cluster_id_failed_creation( - self, mock_c_load, mock_count, mock_check, mock_load): - - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=[False, - 'custom error message']) - mock_load.return_value = node - mock_count.return_value = 11 - mock_check.return_value = '' - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - # do it - res_code, res_msg = action.do_create() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('custom error message', res_msg) - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 11, None, None, True) - node.do_create.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_CREATE, desired_capacity=11) - - def test_do_delete_okay(self, mock_load): - node = mock.Mock(id='NID') - node.do_delete = mock.Mock(return_value=True) - mock_load.return_value = node - action = node_action.NodeAction('ID', 'ACTION', self.ctx) - - # do it - res_code, res_msg = action.do_delete() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - node.do_delete.assert_called_once_with(action.context) - - def test_do_delete_failed(self, mock_load): - node = mock.Mock(id='NID') - node.do_delete = mock.Mock(return_value=False) - mock_load.return_value = node - action = node_action.NodeAction('ID', 'ACTION', self.ctx) - - # Test failed node deletion path - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node deletion failed.', res_msg) - node.do_delete.assert_called_once_with(action.context) - - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_success(self, mock_c_load, mock_count, - mock_check, mock_load): - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = True - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = None - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - # do it - res_code, res_msg = action.do_delete() - - # assertion - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_DELETE, desired_capacity=1) - - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_failed_checking( - self, mock_c_load, mock_count, mock_check, mock_load): - - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = True - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = 'underflow' - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('underflow', res_msg) - - mock_load.assert_called_once_with(action.context, node_id='NID') - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - self.assertEqual(0, node.do_delete.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_failed_deletion( - self, mock_c_load, mock_count, mock_check, mock_load): - - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = False - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = None - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_RPC) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node deletion failed.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - node.do_delete.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_DELETE) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_and_grace_period( - self, mock_c_load, mock_count, mock_check, mock_sleep, mock_load): - - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = True - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = None - action = node_action.NodeAction( - node.id, 'ACTION', self.ctx, cause=consts.CAUSE_RPC, - data={'deletion': {'grace_period': 10}}) - - # do it - res_code, res_msg = action.do_delete() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - mock_sleep.assert_called_once_with(10) - node.do_delete.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_DELETE, desired_capacity=1) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_and_forced_reduce( - self, mock_c_load, mock_count, mock_check, mock_sleep, mock_load): - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = True - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = None - action = node_action.NodeAction( - 'NID', 'ACTION', self.ctx, - cause=consts.CAUSE_RPC, - data={'deletion': {'reduce_desired_capacity': True}}) - - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - node.do_delete.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_DELETE, desired_capacity=1) - - @mock.patch.object(eventlet, 'sleep') - @mock.patch.object(scaleutils, 'check_size_params') - @mock.patch.object(node_obj.Node, 'count_by_cluster') - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_do_delete_with_cluster_id_and_forced_no_reduce( - self, mock_c_load, mock_count, mock_check, mock_sleep, mock_load): - cluster = mock.Mock(id='CID') - mock_c_load.return_value = cluster - node = mock.Mock(id='NID', cluster_id='CID') - node.do_delete.return_value = True - mock_load.return_value = node - mock_count.return_value = 2 - mock_check.return_value = None - action = node_action.NodeAction( - 'NID', 'ACTION', self.ctx, - cause=consts.CAUSE_RPC, - data={'deletion': {'reduce_desired_capacity': False}}) - - # do it - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - mock_c_load.assert_called_once_with(action.context, 'CID') - mock_count.assert_called_once_with(action.context, 'CID') - mock_check.assert_called_once_with(cluster, 1, None, None, True) - node.do_delete.assert_called_once_with(action.context) - cluster.eval_status.assert_called_once_with( - action.context, consts.NODE_DELETE) - - def test_do_delete_derived_success(self, mock_load): - - node = mock.Mock(id='NID', cluster_id='CLUSTER_ID') - node.do_delete.return_value = True - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_DERIVED) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node deleted successfully.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - - def test_do_delete_derived_failed_deletion(self, mock_load): - - node = mock.Mock(id='NID', cluster_id='CLUSTER_ID') - node.do_delete.return_value = False - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - cause=consts.CAUSE_DERIVED) - - res_code, res_msg = action.do_delete() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node deletion failed.', res_msg) - mock_load.assert_called_once_with(action.context, node_id='NID') - - def test_do_update(self, mock_load): - node = mock.Mock() - node.id = 'NID' - mock_load.return_value = node - inputs = {"new_profile_id": "FAKE_PROFILE_ID"} - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - inputs=inputs) - - # Test failed node update path - node.do_update = mock.Mock(return_value=None) - res_code, res_msg = action.do_update() - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node update failed.', res_msg) - node.do_update.assert_called_once_with(action.context, inputs) - node.reset_mock() - - # Test node update success path - node.do_update = mock.Mock(return_value=mock.Mock()) - res_code, res_msg = action.do_update() - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node updated successfully.', res_msg) - node.do_update.assert_called_once_with(action.context, inputs) - - def test_do_update_no_need_update(self, mock_load): - node = mock.Mock() - node.id = 'NID' - node.profile_id = 'PROFILE_ID' - mock_load.return_value = node - inputs = {"new_profile_id": "PROFILE_ID"} - action = node_action.NodeAction(node.id, 'ACTION', self.ctx, - inputs=inputs) - - # Test node update success path - node.do_update = mock.Mock(return_value=mock.Mock()) - res_code, res_msg = action.do_update() - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('No property to update.', res_msg) - self.assertFalse(node.do_update.called) - - def test_do_join_success(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - inputs = {"cluster_id": "FAKE_ID"} - action = node_action.NodeAction(node.id, 'NODE_JOIN', self.ctx, - inputs=inputs) - node.do_join = mock.Mock(return_value=True) - - # Test failed node join path - res_code, res_msg = action.do_join() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node successfully joined cluster.', res_msg) - node.do_join.assert_called_once_with(action.context, 'FAKE_ID') - - def test_do_join_failed_do_join(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - inputs = {"cluster_id": "FAKE_ID"} - action = node_action.NodeAction(node.id, 'NODE_JOIN', self.ctx, - inputs=inputs) - node.do_join = mock.Mock(return_value=False) - - # Test failed node join path - res_code, res_msg = action.do_join() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node failed in joining cluster.', res_msg) - node.do_join.assert_called_once_with(action.context, 'FAKE_ID') - - def test_do_leave_success(self, mock_load): - node = mock.Mock(id='NID', cluster_id='CID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'NODE_LEAVE', self.ctx) - node.do_leave = mock.Mock(return_value=True) - - # Test failed node join path - res_code, res_msg = action.do_leave() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node successfully left cluster.', res_msg) - node.do_leave.assert_called_once_with(action.context) - - def test_do_leave_failed_leave(self, mock_load): - node = mock.Mock(id='NID', cluster_id='CID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'NODE_LEAVE', self.ctx) - node.do_leave = mock.Mock(return_value=False) - - # Test failed node join path - res_code, res_msg = action.do_leave() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node failed in leaving cluster.', res_msg) - node.do_leave.assert_called_once_with(action.context) - - def test_do_check_success(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - node.do_check = mock.Mock(return_value=True) - - res_code, res_msg = action.do_check() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node check succeeded.', res_msg) - node.do_check.assert_called_once_with(action.context) - - def test_do_check_failed(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - node.do_check = mock.Mock(return_value=False) - - res_code, res_msg = action.do_check() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node check failed.', res_msg) - node.do_check.assert_called_once_with(action.context) - - def test_do_recover_success(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - action.inputs = {'operation': ['SWIM', 'DANCE']} - node.do_recover = mock.Mock(return_value=True) - - res_code, res_msg = action.do_recover() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Node recovered successfully.', res_msg) - node.do_recover.assert_called_once_with(action.context, action) - - def test_do_recover_failed(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - action.inputs = {'operation': ['SWIM', 'DANCE']} - - # Test node recover failure path - node.do_recover = mock.Mock(return_value=False) - res_code, res_msg = action.do_recover() - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node recover failed.', res_msg) - node.do_recover.assert_called_once_with(action.context, action) - - def test_do_operation_success(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - action.inputs = {'operation': 'dance', 'params': {}} - node.do_operation = mock.Mock(return_value=True) - - res_code, res_msg = action.do_operation() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual("Node operation 'dance' succeeded.", res_msg) - node.do_operation.assert_called_once_with(action.context, - operation='dance', - params={}) - - def test_do_operation_failed(self, mock_load): - node = mock.Mock(id='NID') - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - action.inputs = {'operation': 'dance', 'params': {}} - node.do_operation = mock.Mock(return_value=False) - - res_code, res_msg = action.do_operation() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node operation 'dance' failed.", res_msg) - node.do_operation.assert_called_once_with(action.context, - operation='dance', - params={}) - - def test_execute(self, mock_load): - node = mock.Mock() - node.id = 'NID' - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'NODE_SING', self.ctx) - action.do_sing = mock.Mock(return_value=(action.RES_OK, 'GOOD')) - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('GOOD', res_msg) - action.do_sing.assert_called_once_with() - - @mock.patch.object(EVENT, 'error') - def test_execute_bad_action(self, mock_error, mock_load): - node = mock.Mock() - node.id = 'NID' - mock_load.return_value = node - action = node_action.NodeAction(node.id, 'NODE_DANCE', self.ctx) - - res_code, res_msg = action._execute() - - self.assertEqual(action.RES_ERROR, res_code) - reason = 'Unsupported action: NODE_DANCE' - self.assertEqual(reason, res_msg) - mock_error.assert_called_once_with(action, 'error', reason) - - @mock.patch.object(lock, 'cluster_lock_acquire') - def test_execute_failed_lock_cluster(self, mock_acquire, mock_load): - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NID' - mock_load.return_value = node - action = node_action.NodeAction('NODE_ID', 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - mock_acquire.return_value = None - - res_code, res_msg = action.execute() - - reason = 'Failed in locking cluster' - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - def test_execute_failed_policy_check(self, mock_check, mock_release, - mock_acquire, mock_load): - node = mock.Mock() - node.id = 'NID' - node.cluster_id = 'FAKE_CLUSTER' - mock_load.return_value = node - - action = node_action.NodeAction('NODE_ID', 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - action.data = { - 'status': policy_mod.CHECK_ERROR, - 'reason': 'Failed policy checking' - } - mock_acquire.return_value = action.id - - res_code, res_msg = action.execute() - - reason = 'Policy check: Failed policy checking' - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE') - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - def test_execute_policy_check_exception(self, mock_check, mock_release, - mock_acquire, mock_load): - node = mock.Mock() - node.id = 'NID' - node.cluster_id = 'FAKE_CLUSTER' - mock_load.return_value = node - - action = node_action.NodeAction('NODE_ID', 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - action.data = { - 'status': policy_mod.CHECK_NONE, - 'reason': '' - } - mock_acquire.return_value = action.id - - mock_check.side_effect = Exception('error') - - res_code, res_msg = action.execute() - - reason = 'Policy check: ' - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE') - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - def test_execute_no_policy_check(self, mock_check, - mock_nl_release, mock_nl_acquire, - mock_cl_release, mock_cl_acquire, - mock_load): - node_id = 'NODE_ID' - node = mock.Mock(id=node_id, cluster_id='FAKE_CLUSTER') - mock_load.return_value = node - action = node_action.NodeAction(node_id, 'NODE_FLY', self.ctx, - cause=consts.CAUSE_DERIVED) - action.id = 'ACTION_ID' - action.owner = 'OWNER' - mock_exec = self.patchobject(action, '_execute', - return_value=(action.RES_OK, 'Good')) - mock_nl_acquire.return_value = action.id - - res_code, res_msg = action.execute() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Good', res_msg) - mock_load.assert_called_once_with(action.context, node_id=node_id) - self.assertEqual(0, mock_cl_acquire.call_count) - self.assertEqual(0, mock_cl_release.call_count) - mock_nl_acquire.assert_called_once_with(self.ctx, node_id, - action.id, action.owner, - False) - mock_nl_release.assert_called_once_with(node_id, action.id) - mock_exec.assert_called_once_with() - self.assertEqual(0, mock_check.call_count) - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - def test_execute_failed_locking_node(self, mock_release_node, - mock_acquire_node, mock_check, - mock_release, mock_acquire, - mock_load): - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NODE_ID' - mock_load.return_value = node - - action = node_action.NodeAction('NODE_ID', 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - action.data = { - 'status': policy_mod.CHECK_OK, - 'reason': 'Policy checking passed' - } - mock_acquire.return_value = 'ACTION_ID' - mock_acquire_node.return_value = None - - res_code, res_msg = action.execute() - - reason = 'Failed in locking node' - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE') - mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID', - 'ACTION_ID', None, False) - mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID') - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - def test_execute_success_stealing_node_lock(self, mock_release_node, - mock_acquire_node, mock_check, - mock_release, mock_acquire, - mock_load): - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NODE_ID' - mock_load.return_value = node - - action = node_action.NodeAction('NODE_ID', 'NODE_OPERATION', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - action.data = { - 'status': policy_mod.CHECK_OK, - 'reason': 'Policy checking passed' - } - action.inputs = {'operation': 'stop', 'params': {}} - - mock_acquire.return_value = 'ACTION_ID' - mock_acquire_node.return_value = True - - res_code, res_msg = action.execute() - - reason = "Node operation 'stop' succeeded." - self.assertEqual(action.RES_OK, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - policy_calls = [ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER') - ] - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_check.assert_has_calls(policy_calls) - mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID', - 'ACTION_ID', None, True) - mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID') - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - def test_execute_success(self, mock_release_node, mock_acquire_node, - mock_check, mock_release, mock_acquire, - mock_load): - def fake_execute(): - node.cluster_id = '' - return (action.RES_OK, 'Execution ok') - - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NODE_ID' - mock_load.return_value = node - - action = node_action.NodeAction(node.id, 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - # check result - action.data = { - 'status': policy_mod.CHECK_OK, - 'reason': 'Policy checking passed' - } - self.patchobject(action, '_execute', side_effect=fake_execute) - mock_acquire.return_value = 'ACTION_ID' - mock_acquire_node.return_value = 'ACTION_ID' - - res_code, res_msg = action.execute() - - reason = 'Execution ok' - self.assertEqual(action.RES_OK, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID', - 'ACTION_ID', None, False) - mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID') - check_calls = [ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER') - ] - mock_check.assert_has_calls(check_calls) - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(base_action.Action, 'policy_check') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - def test_execute_failed_execute(self, mock_release_node, mock_acquire_node, - mock_check, mock_release, mock_acquire, - mock_load): - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NODE_ID' - mock_load.return_value = node - - action = node_action.NodeAction(node.id, 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - # check result - action.data = { - 'status': policy_mod.CHECK_OK, - 'reason': 'Policy checking passed' - } - self.patchobject(action, '_execute', - return_value=(action.RES_ERROR, 'Execution Failed')) - mock_acquire.return_value = 'ACTION_ID' - mock_acquire_node.return_value = 'ACTION_ID' - - res_code, res_msg = action.execute() - - reason = 'Execution Failed' - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID', - 'ACTION_ID', None, False) - mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID') - check_calls = [ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER') - ] - mock_check.assert_has_calls(check_calls) - - @mock.patch.object(lock, 'cluster_lock_acquire') - @mock.patch.object(lock, 'cluster_lock_release') - @mock.patch.object(lock, 'node_lock_acquire') - @mock.patch.object(lock, 'node_lock_release') - def test_execute_failed_post_check(self, mock_release_node, - mock_acquire_node, - mock_release, mock_acquire, - mock_load): - - def fake_check(cluster_id, target): - if target == 'BEFORE': - action.data = { - 'status': policy_mod.CHECK_OK, - 'reason': 'Policy checking passed' - } - else: - action.data = { - 'status': policy_mod.CHECK_ERROR, - 'reason': 'Policy checking failed' - } - - node = mock.Mock() - node.cluster_id = 'FAKE_CLUSTER' - node.id = 'NODE_ID' - mock_load.return_value = node - - action = node_action.NodeAction('NODE_ID', 'NODE_FLY', self.ctx, - cause='RPC Request') - action.id = 'ACTION_ID' - mock_check = self.patchobject(action, 'policy_check', - side_effect=fake_check) - # check result - self.patchobject(action, '_execute', - return_value=(action.RES_OK, 'Ignored')) - mock_acquire.return_value = 'ACTION_ID' - mock_acquire_node.return_value = 'ACTION_ID' - - res_code, res_msg = action.execute() - - reason = 'Policy check: Policy checking failed' - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - mock_load.assert_called_once_with(action.context, node_id='NODE_ID') - mock_acquire.assert_called_once_with(self.ctx, 'FAKE_CLUSTER', - 'ACTION_ID', None, - lock.NODE_SCOPE, False) - mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID', - lock.NODE_SCOPE) - check_calls = [ - mock.call('FAKE_CLUSTER', 'BEFORE'), - mock.call('FAKE_CLUSTER', 'AFTER') - ] - mock_check.assert_has_calls(check_calls) - mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID', - 'ACTION_ID', None, False) - mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID') diff --git a/senlin/tests/unit/engine/actions/test_operation.py b/senlin/tests/unit/engine/actions/test_operation.py deleted file mode 100644 index 93505c83b..000000000 --- a/senlin/tests/unit/engine/actions/test_operation.py +++ /dev/null @@ -1,139 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterOperationTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterOperationTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_operation(self, mock_wait, mock_start, mock_dep, mock_action, - mock_update, mock_load): - cluster = mock.Mock(id='FAKE_ID') - cluster.do_operation.return_value = True - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_OPERATION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = { - 'operation': 'dance', - 'params': {'style': 'tango'}, - 'nodes': ['NODE_ID_1', 'NODE_ID_2'], - } - mock_action.side_effect = ['NODE_OP_ID_1', 'NODE_OP_ID_2'] - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - # do it - res_code, res_msg = action.do_operation() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual("Cluster operation 'dance' completed.", res_msg) - - cluster.do_operation.assert_called_once_with(action.context, - operation='dance') - mock_action.assert_has_calls([ - mock.call(action.context, 'NODE_ID_1', 'NODE_OPERATION', - name='node_dance_NODE_ID_', cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': 'dance', - 'params': {'style': 'tango'} - }), - mock.call(action.context, 'NODE_ID_2', 'NODE_OPERATION', - name='node_dance_NODE_ID_', cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': 'dance', - 'params': {'style': 'tango'} - }), - ]) - mock_dep.assert_called_once_with( - action.context, ['NODE_OP_ID_1', 'NODE_OP_ID_2'], - 'CLUSTER_ACTION_ID') - mock_update.assert_has_calls([ - mock.call(action.context, 'NODE_OP_ID_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_OP_ID_2', {'status': 'READY'}), - ]) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with(action.context, 'dance') - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_operation_failed_wait(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - cluster = mock.Mock(id='FAKE_ID') - cluster.do_operation.return_value = True - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_OPERATION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = { - 'operation': 'dance', - 'params': {'style': 'tango'}, - 'nodes': ['NODE_ID_1', 'NODE_ID_2'], - } - mock_action.side_effect = ['NODE_OP_ID_1', 'NODE_OP_ID_2'] - mock_wait.return_value = (action.RES_ERROR, 'Something is wrong') - - # do it - res_code, res_msg = action.do_operation() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Something is wrong", res_msg) - - cluster.do_operation.assert_called_once_with(action.context, - operation='dance') - mock_action.assert_has_calls([ - mock.call(action.context, 'NODE_ID_1', 'NODE_OPERATION', - name='node_dance_NODE_ID_', cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': 'dance', - 'params': {'style': 'tango'} - }), - mock.call(action.context, 'NODE_ID_2', 'NODE_OPERATION', - name='node_dance_NODE_ID_', cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': 'dance', - 'params': {'style': 'tango'} - }), - ]) - mock_dep.assert_called_once_with( - action.context, ['NODE_OP_ID_1', 'NODE_OP_ID_2'], - 'CLUSTER_ACTION_ID') - mock_update.assert_has_calls([ - mock.call(action.context, 'NODE_OP_ID_1', {'status': 'READY'}), - mock.call(action.context, 'NODE_OP_ID_2', {'status': 'READY'}), - ]) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with(action.context, 'dance') diff --git a/senlin/tests/unit/engine/actions/test_recover.py b/senlin/tests/unit/engine/actions/test_recover.py deleted file mode 100644 index 7c23cda41..000000000 --- a/senlin/tests/unit/engine/actions/test_recover.py +++ /dev/null @@ -1,361 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import scaleutils as su -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.engine import node as nm -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterRecoverTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterRecoverTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_recover(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ERROR') - - cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', - desired_capacity=2) - cluster.do_recover.return_value = True - mock_load.return_value = cluster - cluster.nodes = [node1, node2] - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.data = {} - - mock_action.return_value = 'NODE_RECOVER_ID' - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - # do it - res_code, res_msg = action.do_recover() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster recovery succeeded.', res_msg) - - cluster.do_recover.assert_called_once_with(action.context) - mock_action.assert_called_once_with( - action.context, 'NODE_2', 'NODE_RECOVER', - name='node_recover_NODE_2', - cause=consts.CAUSE_DERIVED, - inputs={'operation': None, 'operation_params': None} - ) - mock_dep.assert_called_once_with(action.context, ['NODE_RECOVER_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with(action.context, 'NODE_RECOVER_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(ca.ClusterAction, '_check_capacity') - def test_do_recover_with_input(self, mock_check, mock_wait, mock_start, - mock_dep, mock_action, mock_update, - mock_load): - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ERROR') - cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', - desired_capacity=2) - cluster.nodes = [node1] - cluster.do_recover.return_value = True - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = { - 'operation': consts.RECOVER_REBOOT, - 'check': False, - 'check_capacity': True - } - - mock_action.return_value = 'NODE_RECOVER_ID' - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - # do it - res_code, res_msg = action.do_recover() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster recovery succeeded.', res_msg) - - cluster.do_recover.assert_called_once_with(action.context) - mock_action.assert_called_once_with( - action.context, 'NODE_1', 'NODE_RECOVER', - name='node_recover_NODE_1', - cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': consts.RECOVER_REBOOT, - 'operation_params': None - } - ) - mock_dep.assert_called_once_with(action.context, ['NODE_RECOVER_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with(action.context, 'NODE_RECOVER_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - mock_check.assert_called_once_with() - - def test_do_recover_all_nodes_active(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', desired_capacity=2) - cluster.do_recover.return_value = True - mock_load.return_value = cluster - - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ACTIVE') - cluster.nodes = [node1, node2] - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - - # do it - res_code, res_msg = action.do_recover() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster recovery succeeded.', res_msg) - cluster.do_recover.assert_called_once_with(self.ctx) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(ca.ClusterAction, '_check_capacity') - def test_do_recover_failed_waiting(self, mock_check, mock_wait, - mock_start, mock_dep, mock_action, - mock_update, mock_load): - node = mock.Mock(id='NODE_1', cluster_id='CID', status='ERROR') - cluster = mock.Mock(id='CID', desired_capacity=2) - cluster.do_recover.return_value = True - cluster.nodes = [node] - mock_load.return_value = cluster - mock_action.return_value = 'NODE_ACTION_ID' - - action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_RECOVER', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = { - 'operation': consts.RECOVER_RECREATE, - 'check': False, - 'check_capacity': False - } - - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - - res_code, res_msg = action.do_recover() - - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) - - mock_load.assert_called_once_with(self.ctx, 'FAKE_CLUSTER') - cluster.do_recover.assert_called_once_with(action.context) - mock_action.assert_called_once_with( - action.context, 'NODE_1', 'NODE_RECOVER', - name='node_recover_NODE_1', - cause=consts.CAUSE_DERIVED, - inputs={ - 'operation': consts.RECOVER_RECREATE, - 'operation_params': None - } - ) - mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with(action.context, 'NODE_ACTION_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - self.assertFalse(mock_check.called) - - @mock.patch.object(ca.ClusterAction, '_check_capacity') - @mock.patch.object(nm.Node, 'load') - def test_do_recover_with_check_active(self, mock_node, mock_desired, - mock_load): - cluster = mock.Mock(id='FAKE_ID', desired_capacity=2) - cluster.do_recover.return_value = True - mock_load.return_value = cluster - - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ERROR') - cluster.nodes = [node1, node2] - - eng_node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', - status='ACTIVE') - eng_node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', - status='ERROR') - mock_node.side_effect = [eng_node1, eng_node2] - - def set_status(*args, **kwargs): - eng_node2.status = 'ACTIVE' - - mock_check = self.patchobject(nm.Node, 'do_check') - mock_check.side_effect = set_status - eng_node2.do_check = mock_check - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - action.inputs = {'check': True} - - # do it - res_code, res_msg = action.do_recover() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster recovery succeeded.', res_msg) - node_calls = [ - mock.call(self.ctx, node_id='NODE_1'), - mock.call(self.ctx, node_id='NODE_2') - ] - mock_node.assert_has_calls(node_calls) - eng_node1.do_check.assert_called_once_with(self.ctx) - eng_node2.do_check.assert_called_once_with(self.ctx) - cluster.do_recover.assert_called_once_with(self.ctx) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - self.assertFalse(mock_desired.called) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - @mock.patch.object(ca.ClusterAction, '_check_capacity') - @mock.patch.object(nm.Node, 'load') - def test_do_recover_with_check_error(self, mock_node, mock_desired, - mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ACTIVE') - - cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', - desired_capacity=2) - cluster.do_recover.return_value = True - mock_load.return_value = cluster - cluster.nodes = [node1, node2] - - eng_node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', - status='ACTIVE') - eng_node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', - status='ACTIVE') - mock_node.side_effect = [eng_node1, eng_node2] - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'check': True, - 'check_capacity': True} - - mock_action.return_value = 'NODE_RECOVER_ID' - mock_wait.return_value = (action.RES_OK, 'Everything is Okay') - - def set_status(*args, **kwargs): - eng_node2.status = 'ERROR' - - mock_check = self.patchobject(nm.Node, 'do_check') - mock_check.side_effect = set_status - eng_node2.do_check = mock_check - - # do it - res_code, res_msg = action.do_recover() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster recovery succeeded.', res_msg) - - cluster.do_recover.assert_called_once_with(action.context) - mock_action.assert_called_once_with( - action.context, 'NODE_2', 'NODE_RECOVER', - name='node_recover_NODE_2', - cause=consts.CAUSE_DERIVED, - inputs={'operation': None, - 'operation_params': None} - ) - node_calls = [ - mock.call(self.ctx, node_id='NODE_1'), - mock.call(self.ctx, node_id='NODE_2') - ] - mock_node.assert_has_calls(node_calls) - eng_node1.do_check.assert_called_once_with(self.ctx) - eng_node2.do_check.assert_called_once_with(self.ctx) - mock_dep.assert_called_once_with(action.context, ['NODE_RECOVER_ID'], - 'CLUSTER_ACTION_ID') - mock_update.assert_called_once_with(action.context, 'NODE_RECOVER_ID', - {'status': 'READY'}) - mock_start.assert_called_once_with() - mock_wait.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RECOVER) - mock_desired.assert_called_once_with() - - @mock.patch.object(ca.ClusterAction, '_create_nodes') - def test_check_capacity_create(self, mock_create, mock_load): - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - - cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', - desired_capacity=2) - mock_load.return_value = cluster - cluster.nodes = [node1] - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - - action._check_capacity() - - mock_create.assert_called_once_with(1) - - @mock.patch.object(su, 'nodes_by_random') - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - def test_check_capacity_delete(self, mock_delete, mock_get, - mock_su, mock_load): - node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') - node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ERROR') - - cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', - desired_capacity=1) - mock_load.return_value = cluster - cluster.nodes = [node1, node2] - mock_get.return_value = [node1, node2] - mock_su.return_value = [node2.id] - - action = ca.ClusterAction(cluster.id, 'CLUSTER_RECOVER', self.ctx) - - action._check_capacity() - - mock_get.assert_called_once_with(action.context, cluster.id) - mock_su.assert_called_once_with([node1, node2], 1) - mock_delete.assert_called_once_with(['NODE_2']) diff --git a/senlin/tests/unit/engine/actions/test_replace_nodes.py b/senlin/tests/unit/engine/actions/test_replace_nodes.py deleted file mode 100644 index 61e6d8cfd..000000000 --- a/senlin/tests/unit/engine/actions/test_replace_nodes.py +++ /dev/null @@ -1,296 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterReplaceNodesTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterReplaceNodesTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_replace_nodes(self, mock_wait, mock_start, mock_dep, - mock_get_node, mock_action, mock_update, - mock_load): - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=10) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'candidates': {'O_NODE_1': 'R_NODE_1'}, 'foo': 'bar'} - action.outputs = {} - - origin_node = mock.Mock(id='O_NODE_1', cluster_id='CLUSTER_ID', - ACTIVE='ACTIVE', status='ACTIVE') - replace_node = mock.Mock(id='R_NODE_1', cluster_id='', - ACTIVE='ACTIVE', status='ACTIVE') - mock_get_node.side_effect = [origin_node, replace_node] - mock_action.side_effect = ['NODE_LEAVE_1', 'NODE_JOIN_1'] - mock_wait.return_value = (action.RES_OK, 'Free to fly!') - - # do the action - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Completed replacing nodes.', res_msg) - - mock_get_node.assert_has_calls([ - mock.call(action.context, 'O_NODE_1'), - mock.call(action.context, 'R_NODE_1')]) - mock_load.assert_called_once_with( - action.context, - 'CLUSTER_ID') - mock_action.assert_has_calls([ - mock.call(action.context, 'O_NODE_1', 'NODE_LEAVE', - name='node_leave_O_NODE_1', - cluster_id='CLUSTER_ID', - cause='Derived Action'), - mock.call(action.context, 'R_NODE_1', 'NODE_JOIN', - name='node_join_R_NODE_1', - cluster_id='CLUSTER_ID', - cause='Derived Action', - inputs={'cluster_id': 'CLUSTER_ID'})]) - - mock_dep.assert_has_calls([ - mock.call(action.context, - ['NODE_JOIN_1'], - 'CLUSTER_ACTION_ID'), - mock.call(action.context, - ['NODE_JOIN_1'], - 'NODE_LEAVE_1')]) - - mock_update.assert_has_calls([ - mock.call(action.context, - 'NODE_JOIN_1', - {'status': 'READY'}), - mock.call(action.context, - 'NODE_LEAVE_1', - {'status': 'READY'})]) - mock_start.assert_called_once_with() - - mock_wait.assert_called_once_with() - - cluster.remove_node.assert_called_once_with(origin_node) - cluster.add_node.assert_called_once_with(replace_node) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_REPLACE_NODES) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_original_not_found(self, mock_get_node, - mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx) - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - origin_node = None - replace_node = mock.Mock(id='REPLACE_NODE', cluster_id='', - ACTIVE='ACTIVE', status='ACTIVE') - mock_get_node.side_effect = [origin_node, replace_node] - # do the action - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Original node ORIGIN_NODE not found.', - res_msg) - - def test_do_replace_nodes_empty_candidates(self, mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx) - action.inputs = {'candidates': {}} - res_code, res_msg = action.do_replace_nodes() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual( - 'Candidates must be a non-empty dict. Instead got {}', - res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_replacement_not_found(self, mock_get_node, - mock_load): - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx) - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - origin_node = mock.Mock(id='ORIGIN_NODE', cluster_id='CLUSTER_ID', - ACTIVE='ACTIVE', status='ACTIVE') - replace_node = None - mock_get_node.side_effect = [origin_node, replace_node] - # do the action - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Replacement node REPLACE_NODE not found.', - res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_not_a_member(self, mock_get_node, - mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - - origin_node = mock.Mock(id='ORIGIN_NODE', cluster_id='') - mock_get_node.return_value = origin_node - # do action - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node ORIGIN_NODE is not a member of the ' - 'cluster FAKE_CLUSTER.', res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_node_already_member(self, mock_get_node, - mock_load): - cluster = mock.Mock(id='FAKE_CLUSTER') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - - replace_node = mock.Mock(id='REPLACE_NODE', - cluster_id='FAKE_CLUSTER') - mock_get_node.return_value = replace_node - - # do it - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node REPLACE_NODE is already owned by cluster ' - 'FAKE_CLUSTER.', res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_in_other_cluster(self, mock_get_node, - mock_load): - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=10) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - action.outputs = {} - - origin_node = mock.Mock(id='ORIGIN_NODE', cluster_id='CLUSTER_ID', - ACTIVE='ACTIVE', status='ACTIVE') - replace_node = mock.Mock(id='REPLACE_NODE', cluster_id='FAKE_CLUSTER', - ACTIVE='ACTIVE', status='ACTIVE') - mock_get_node.side_effect = [origin_node, replace_node] - - # do it - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node REPLACE_NODE is already owned by cluster ' - 'FAKE_CLUSTER.', res_msg) - - @mock.patch.object(no.Node, 'get') - def test_do_replace_nodes_node_not_active(self, mock_get_node, mock_load): - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=10) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'candidates': {'ORIGIN_NODE': 'REPLACE_NODE'}} - action.outputs = {} - - origin_node = mock.Mock(id='ORIGIN_NODE', cluster_id='CLUSTER_ID', - ACTIVE='ACTIVE', status='ACTIVE') - replace_node = mock.Mock(id='REPLACE_NODE', cluster_id='', - ACTIVE='ACTIVE', status='ERROR') - mock_get_node.side_effect = [origin_node, replace_node] - - # do it - res_code, res_msg = action.do_replace_nodes() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("Node REPLACE_NODE is not in ACTIVE status.", res_msg) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(no.Node, 'get') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_do_replace_failed_waiting(self, mock_wait, mock_start, mock_dep, - mock_get_node, mock_action, - mock_update, mock_load): - cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=10) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.id = 'CLUSTER_ACTION_ID' - action.inputs = {'candidates': {'O_NODE_1': 'R_NODE_1'}} - action.outputs = {} - - origin_node = mock.Mock(id='O_NODE_1', cluster_id='CLUSTER_ID', - ACTIVE='ACTIVE', status='ACTIVE') - replace_node = mock.Mock(id='R_NODE_1', cluster_id='', - ACTIVE='ACTIVE', status='ACTIVE') - mock_get_node.side_effect = [origin_node, replace_node] - mock_action.side_effect = ['NODE_LEAVE_1', 'NODE_JOIN_1'] - mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!') - - # do the action - res_code, res_msg = action.do_replace_nodes() - - # assertions - mock_action.assert_has_calls([ - mock.call(action.context, 'O_NODE_1', 'NODE_LEAVE', - name='node_leave_O_NODE_1', - cluster_id='CLUSTER_ID', - cause='Derived Action'), - mock.call(action.context, 'R_NODE_1', 'NODE_JOIN', - name='node_join_R_NODE_1', - cluster_id='CLUSTER_ID', - cause='Derived Action', - inputs={'cluster_id': 'CLUSTER_ID'})]) - - mock_dep.assert_has_calls([ - mock.call(action.context, - ['NODE_JOIN_1'], - 'CLUSTER_ACTION_ID'), - mock.call(action.context, - ['NODE_JOIN_1'], - 'NODE_LEAVE_1')]) - - mock_update.assert_has_calls([ - mock.call(action.context, - 'NODE_JOIN_1', - {'status': 'READY'}), - mock.call(action.context, - 'NODE_LEAVE_1', - {'status': 'READY'})]) - - self.assertEqual(action.RES_TIMEOUT, res_code) - self.assertEqual('Timeout!', res_msg) diff --git a/senlin/tests/unit/engine/actions/test_resize.py b/senlin/tests/unit/engine/actions/test_resize.py deleted file mode 100644 index ab1f27cba..000000000 --- a/senlin/tests/unit/engine/actions/test_resize.py +++ /dev/null @@ -1,263 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import scaleutils -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterResizeTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterResizeTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_update_cluster_size(self, mock_load): - cluster = mock.Mock(id='CID', desired_capacity=10, nodes=[]) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_RESIZE', self.ctx, - inputs={'min_size': 1, 'max_size': 20}) - - action._update_cluster_size(15) - - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster resize started.', - desired_capacity=15, min_size=1, max_size=20) - - def test_update_cluster_size_minimum(self, mock_load): - cluster = mock.Mock(id='CID', desired_capacity=10, nodes=[]) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_RESIZE', self.ctx, - inputs={}) - - action._update_cluster_size(15) - - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster resize started.', - desired_capacity=15) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(scaleutils, 'nodes_by_random') - @mock.patch.object(ca.ClusterAction, '_sleep') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - def test_do_resize_shrink(self, mock_delete, mock_sleep, mock_select, - mock_size, mock_count, mock_load): - cluster = mock.Mock(id='CID', nodes=[], RESIZING='RESIZING') - for n in range(10): - node = mock.Mock(id='NODE-ID-%s' % (n + 1)) - cluster.nodes.append(node) - mock_load.return_value = cluster - mock_count.return_value = 10 - action = ca.ClusterAction( - cluster.id, 'CLUSTER_RESIZE', self.ctx, - data={ - 'deletion': { - 'count': 2, - 'grace_period': 2, - 'destroy_after_deletion': True - } - } - ) - mock_delete.return_value = (action.RES_OK, 'All dependents completed.') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster resize succeeded.', res_msg) - - mock_select.assert_called_once_with(cluster.nodes, 2) - mock_size.assert_called_once_with(8) - mock_sleep.assert_called_once_with(2) - mock_delete.assert_called_once_with(mock_select.return_value) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(scaleutils, 'nodes_by_random') - @mock.patch.object(ca.ClusterAction, '_sleep') - @mock.patch.object(scaleutils, 'parse_resize_params') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - def test_do_resize_shrink_with_parsing(self, mock_delete, mock_parse, - mock_sleep, mock_select, mock_size, - mock_count, mock_load): - - def fake_parse(*args, **kwargs): - # side effect - action.data = {'deletion': {'count': 1}} - return action.RES_OK, '' - - cluster = mock.Mock(id='CID', nodes=[], RESIZING='RESIZING') - for n in range(10): - node = mock.Mock(id='NODE-ID-%s' % (n + 1)) - cluster.nodes.append(node) - mock_count.return_value = 10 - mock_load.return_value = cluster - mock_parse.side_effect = fake_parse - action = ca.ClusterAction(cluster.id, 'CLUSTER_RESIZE', self.ctx, - inputs={'blah': 'blah'}, data={}) - mock_delete.return_value = (action.RES_OK, 'All dependents completed.') - - # deletion policy is attached to the action - res_code, res_msg = action.do_resize() - - self.assertEqual({'deletion': {'count': 1}}, action.data) - mock_parse.assert_called_once_with(action, cluster, 10) - mock_select.assert_called_once_with(cluster.nodes, 1) - mock_size.assert_called_once_with(9) - mock_sleep.assert_called_once_with(0) - mock_delete.assert_called_once_with(mock_select.return_value) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - def test_do_resize_shrink_failed_delete(self, mock_delete, mock_size, - mock_count, mock_load): - cluster = mock.Mock(id='CLID', nodes=[], RESIZING='RESIZING') - mock_count.return_value = 3 - mock_load.return_value = cluster - action = ca.ClusterAction( - cluster.id, 'CLUSTER_RESIZE', self.ctx, - data={ - 'deletion': { - 'count': 2, - 'grace_period': 2, - 'candidates': ['NODE1', 'NODE2'] - } - } - ) - mock_delete.return_value = (action.RES_ERROR, 'Bad things happened.') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Bad things happened.', res_msg) - - mock_size.assert_called_once_with(1) - mock_delete.assert_called_once_with(['NODE1', 'NODE2']) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(ca.ClusterAction, '_create_nodes') - def test_do_resize_grow(self, mock_create, mock_size, mock_count, - mock_load): - cluster = mock.Mock(id='ID', nodes=[], RESIZING='RESIZING') - mock_load.return_value = cluster - mock_count.return_value = 10 - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={'creation': {'count': 2}}) - - mock_create.return_value = (action.RES_OK, 'All dependents completed.') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster resize succeeded.', res_msg) - - mock_size.assert_called_once_with(12) - mock_create.assert_called_once_with(2) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(scaleutils, 'parse_resize_params') - @mock.patch.object(ca.ClusterAction, '_create_nodes') - def test_do_resize_grow_with_parsing(self, mock_create, mock_parse, - mock_size, mock_count, mock_load): - def fake_parse(*args, **kwargs): - action.data = {'creation': {'count': 3}} - return action.RES_OK, '' - - cluster = mock.Mock(id='ID', nodes=[], RESIZING='RESIZING') - mock_load.return_value = cluster - mock_count.return_value = 10 - mock_parse.side_effect = fake_parse - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'blah': 'blah'}) - mock_create.return_value = (action.RES_OK, 'All dependents completed.') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster resize succeeded.', res_msg) - - mock_parse.assert_called_once_with(action, cluster, 10) - mock_size.assert_called_once_with(13) - mock_create.assert_called_once_with(3) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(ca.ClusterAction, '_update_cluster_size') - @mock.patch.object(ca.ClusterAction, '_create_nodes') - def test_do_resize_grow_failed_create(self, mock_create, mock_size, - mock_count, mock_load): - cluster = mock.Mock(id='CLID', nodes=[], RESIZING='RESIZING') - mock_load.return_value = cluster - mock_count.return_value = 3 - action = ca.ClusterAction( - cluster.id, 'CLUSTER_RESIZE', self.ctx, - data={'creation': {'count': 2}}) - mock_create.return_value = (action.RES_ERROR, 'Bad things happened.') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Bad things happened.', res_msg) - - mock_size.assert_called_once_with(5) - mock_create.assert_called_once_with(2) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_RESIZE) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(scaleutils, 'parse_resize_params') - def test_do_resize_failed_parsing(self, mock_parse, mock_count, mock_load): - cluster = mock.Mock(RESIZING='RESIZING', nodes=[]) - mock_load.return_value = cluster - mock_count.return_value = 8 - action = ca.ClusterAction('ID', 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'blah': 'blah'}) - mock_parse.return_value = (action.RES_ERROR, 'Failed parsing') - - # do it - res_code, res_msg = action.do_resize() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Failed parsing', res_msg) - mock_parse.assert_called_once_with(action, cluster, 8) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) diff --git a/senlin/tests/unit/engine/actions/test_scale_in.py b/senlin/tests/unit/engine/actions/test_scale_in.py deleted file mode 100644 index 1ebc7404d..000000000 --- a/senlin/tests/unit/engine/actions/test_scale_in.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import scaleutils -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterScaleInTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterScaleInTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(scaleutils, 'nodes_by_random') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_in_no_pd_no_count(self, mock_count, mock_delete, - mock_select, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={}) - mock_count.return_value = 10 - mock_delete.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - # deleting 1 nodes - mock_count.assert_called_once_with(action.context, 'CID') - mock_delete.assert_called_once_with(mock.ANY) - mock_select.assert_called_once_with(cluster.nodes, 1) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale in started.', - desired_capacity=9) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_IN) - - @mock.patch.object(ca.ClusterAction, '_sleep') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_in_with_pd_no_input(self, mock_count, mock_delete, - mock_sleep, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.data = { - 'deletion': { - 'count': 2, - 'grace_period': 2, - 'candidates': ['NODE_ID_3', 'NODE_ID_4'], - } - } - action.inputs = {} - mock_count.return_value = 5 - mock_delete.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - # deleting 2 nodes - mock_count.assert_called_once_with(action.context, 'CID') - mock_delete.assert_called_once_with(mock.ANY) - self.assertEqual(2, len(mock_delete.call_args[0][0])) - self.assertIn('NODE_ID_3', mock_delete.call_args[0][0]) - self.assertIn('NODE_ID_4', mock_delete.call_args[0][0]) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale in started.', - desired_capacity=3) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_IN) - mock_sleep.assert_called_once_with(2) - - @mock.patch.object(scaleutils, 'nodes_by_random') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_in_no_pd_with_input(self, mock_count, mock_delete, - mock_select, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 3}) - mock_count.return_value = 11 - mock_delete.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - # deleting 3 nodes - mock_count.assert_called_once_with(action.context, 'CID') - mock_delete.assert_called_once_with(mock.ANY) - mock_select.assert_called_once_with(cluster.nodes, 3) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale in started.', - desired_capacity=8) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_IN) - - def test_do_scale_in_negative_count(self, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': -3}) - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Invalid count (-3) for scaling in.', res_msg) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - def test_do_scale_in_invalid_count(self, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 'tt'}) - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Invalid count (tt) for scaling in.', res_msg) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_in_failed_check(self, mock_count, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 3}) - mock_count.return_value = 3 - - # do it - res_code, res_msg = action.do_scale_in() - - # assertions - mock_count.assert_called_once_with(action.context, 'CID') - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("The target capacity (0) is less than the cluster's " - "min_size (1).", res_msg) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(scaleutils, 'nodes_by_random') - @mock.patch.object(ca.ClusterAction, '_delete_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_in_failed_delete_nodes(self, mock_count, mock_delete, - mock_select, mock_load): - - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 2}) - mock_count.return_value = 5 - - # Error cases - for result in (action.RES_ERROR, action.RES_CANCEL, - action.RES_TIMEOUT, action.RES_RETRY): - mock_delete.return_value = result, 'Too cold to work!' - # do it - res_code, res_msg = action.do_scale_in() - # assertions - self.assertEqual(result, res_code) - self.assertEqual('Too cold to work!', res_msg) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, - 'Cluster scale in started.', - desired_capacity=3) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_IN) - cluster.set_status.reset_mock() - cluster.eval_status.reset_mock() - mock_delete.assert_called_once_with(mock.ANY) - mock_delete.reset_mock() diff --git a/senlin/tests/unit/engine/actions/test_scale_out.py b/senlin/tests/unit/engine/actions/test_scale_out.py deleted file mode 100644 index 83df36e79..000000000 --- a/senlin/tests/unit/engine/actions/test_scale_out.py +++ /dev/null @@ -1,205 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterScaleOutTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterScaleOutTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ca.ClusterAction, '_create_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_out_no_pd_no_inputs(self, mock_count, mock_create, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={}) - mock_count.return_value = 5 - mock_create.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - mock_count.assert_called_once_with(action.context, 'CID') - mock_create.assert_called_once_with(1) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale out started.', - desired_capacity=6) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_OUT) - - @mock.patch.object(ca.ClusterAction, '_create_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_out_with_pd_no_inputs(self, mock_count, mock_create, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={'creation': {'count': 3}}, inputs={}) - mock_count.return_value = 7 - mock_create.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - # creating 3 nodes - mock_count.assert_called_once_with(action.context, 'CID') - mock_create.assert_called_once_with(3) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale out started.', - desired_capacity=10) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_OUT) - - @mock.patch.object(ca.ClusterAction, '_create_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_out_no_pd_with_inputs(self, mock_count, mock_create, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 2}) - mock_count.return_value = 8 - mock_create.return_value = (action.RES_OK, 'Life is beautiful.') - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster scaling succeeded.', res_msg) - - # creating 2 nodes, given that the cluster is empty now - mock_count.assert_called_once_with(action.context, 'CID') - mock_create.assert_called_once_with(2) - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, 'Cluster scale out started.', - desired_capacity=10) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_OUT) - - def test_do_scale_out_count_negative(self, mock_load): - cluster = mock.Mock(id='CID') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': -2}) - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Invalid count (-2) for scaling out.', res_msg) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - def test_do_scale_out_count_invalid(self, mock_load): - cluster = mock.Mock(id='CID') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 'tt'}) - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Invalid count (tt) for scaling out.', res_msg) - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_out_failed_checking(self, mock_count, mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=4) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 2}) - mock_count.return_value = 3 - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual("The target capacity (5) is greater than the " - "cluster's max_size (4).", res_msg) - mock_count.assert_called_once_with(action.context, 'CID') - self.assertEqual(0, cluster.set_status.call_count) - self.assertEqual(0, cluster.eval_status.call_count) - - @mock.patch.object(ca.ClusterAction, '_create_nodes') - @mock.patch.object(no.Node, 'count_by_cluster') - def test_do_scale_out_failed_create_nodes(self, mock_count, mock_create, - mock_load): - cluster = mock.Mock(id='CID', min_size=1, max_size=-1) - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx, - data={}, inputs={'count': 2}) - mock_count.return_value = 4 - - # Error cases - for result in (action.RES_ERROR, action.RES_CANCEL, - action.RES_TIMEOUT): - mock_create.return_value = result, 'Too hot to work!' - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(result, res_code) - self.assertEqual('Too hot to work!', res_msg) - - cluster.set_status.assert_called_once_with( - action.context, consts.CS_RESIZING, - 'Cluster scale out started.', - desired_capacity=6) - cluster.set_status.reset_mock() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_SCALE_OUT) - cluster.eval_status.reset_mock() - mock_create.assert_called_once_with(2) - mock_create.reset_mock() - - # Timeout case - mock_create.return_value = action.RES_RETRY, 'Not good time!' - - # do it - res_code, res_msg = action.do_scale_out() - - # assertions - self.assertEqual(action.RES_RETRY, res_code) - self.assertEqual('Not good time!', res_msg) - self.assertEqual(1, cluster.set_status.call_count) - self.assertEqual(1, cluster.eval_status.call_count) - mock_create.assert_called_once_with(2) diff --git a/senlin/tests/unit/engine/actions/test_update.py b/senlin/tests/unit/engine/actions/test_update.py deleted file mode 100644 index b92761309..000000000 --- a/senlin/tests/unit/engine/actions/test_update.py +++ /dev/null @@ -1,333 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.engine import dispatcher -from senlin.objects import action as ao -from senlin.objects import dependency as dobj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterUpdateTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterUpdateTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(ca.ClusterAction, '_update_nodes') - def test_do_update_multi(self, mock_update, mock_load): - node1 = mock.Mock(id='fake id 1') - node2 = mock.Mock(id='fake id 2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'name': 'FAKE_NAME', - 'metadata': {'foo': 'bar'}, - 'timeout': 3600, - 'new_profile_id': 'FAKE_PROFILE'} - reason = 'Cluster update completed.' - mock_update.return_value = (action.RES_OK, reason) - # do it - res_code, res_msg = action.do_update() - - # assertions - self.assertEqual(action.RES_OK, res_code) - self.assertEqual(reason, res_msg) - mock_update.assert_called_once_with('FAKE_PROFILE', - [node1, node2]) - - @mock.patch.object(ca.ClusterAction, '_update_nodes') - def test_do_update_set_status_failed(self, mock_update, mock_load): - node1 = mock.Mock(id='fake id 1') - node2 = mock.Mock(id='fake id 2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - - cluster.do_update.return_value = False - reason = 'Cluster update failed.' - # do it - res_code, res_msg = action.do_update() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - self.assertEqual(0, mock_update.call_count) - - @mock.patch.object(ca.ClusterAction, '_update_nodes') - def test_do_update_multi_failed(self, mock_update, mock_load): - node1 = mock.Mock(id='fake id 1') - node2 = mock.Mock(id='fake id 2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE') - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - reason = 'Failed in updating nodes.' - mock_update.return_value = (action.RES_ERROR, reason) - # do it - res_code, res_msg = action.do_update() - - # assertions - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual(reason, res_msg) - mock_update.assert_called_once_with('FAKE_PROFILE', - [node1, node2]) - - def test_do_update_not_profile(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', nodes=[], ACTIVE='ACTIVE') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {} - res_code, res_msg = action.do_update() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster update completed.', res_msg) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, updated_at=mock.ANY) - - def test_do_update_profile_only(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', nodes=[], ACTIVE='ACTIVE') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - config = {'cluster.stop_timeout_before_update': 25} - action.inputs = {'name': 'FAKE_NAME', - 'metadata': {'foo': 'bar'}, - 'timeout': 3600, - 'new_profile_id': 'FAKE_PROFILE', - 'profile_only': True, - 'config': config} - res_code, res_msg = action.do_update() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster update completed.', res_msg) - self.assertEqual(action.entity.config, config) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, profile_id='FAKE_PROFILE', - updated_at=mock.ANY) - - @mock.patch.object(ca.ClusterAction, '_update_nodes') - def test_do_update_invalid_stop_timeout(self, mock_update, mock_load): - cluster = mock.Mock(id='FAKE_ID', nodes=[], ACTIVE='ACTIVE') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - config = {'cluster.stop_timeout_before_update': 'abc'} - action.inputs = {'name': 'FAKE_NAME', - 'metadata': {'foo': 'bar'}, - 'timeout': 3600, - 'new_profile_id': 'FAKE_PROFILE', - 'profile_only': True, - 'config': config} - res_code, res_msg = action.do_update() - - self.assertEqual(action.RES_ERROR, res_code) - mock_update.assert_not_called() - - def test_do_update_empty_cluster(self, mock_load): - cluster = mock.Mock(id='FAKE_ID', nodes=[], ACTIVE='ACTIVE') - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - - # do it - res_code, res_msg = action.do_update() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Cluster update completed.', res_msg) - self.assertEqual('FAKE_PROFILE', cluster.profile_id) - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, profile_id='FAKE_PROFILE', - updated_at=mock.ANY) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_update_nodes_no_policy(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='node_id1') - node2 = mock.Mock(id='node_id2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE', config={}) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - action.id = 'CLUSTER_ACTION_ID' - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.side_effect = ['NODE_ACTION1', 'NODE_ACTION2'] - kwargs1 = { - 'name': 'node_update_node_id1', - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': { - 'new_profile_id': 'FAKE_PROFILE', - }, - } - kwargs2 = { - 'name': 'node_update_node_id2', - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': { - 'new_profile_id': 'FAKE_PROFILE', - }, - } - - res_code, reason = action._update_nodes('FAKE_PROFILE', - [node1, node2]) - self.assertEqual(res_code, action.RES_OK) - self.assertEqual(reason, 'Cluster update completed.') - mock_action.assert_has_calls([ - mock.call(action.context, node1.id, consts.NODE_UPDATE, **kwargs1), - mock.call(action.context, node2.id, consts.NODE_UPDATE, **kwargs2), - ]) - self.assertEqual(1, mock_dep.call_count) - self.assertEqual(2, mock_update.call_count) - mock_start.assert_called_once_with() - - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, profile_id='FAKE_PROFILE', - updated_at=mock.ANY) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_update_nodes_with_config(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='node_id1') - node2 = mock.Mock(id='node_id2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE', config={'blah': 'abc'}) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - action.id = 'CLUSTER_ACTION_ID' - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.side_effect = ['NODE_ACTION1', 'NODE_ACTION2'] - kwargs1 = { - 'name': 'node_update_node_id1', - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': { - 'blah': 'abc', - 'new_profile_id': 'FAKE_PROFILE', - }, - } - kwargs2 = { - 'name': 'node_update_node_id2', - 'cluster_id': cluster.id, - 'cause': consts.CAUSE_DERIVED, - 'inputs': { - 'blah': 'abc', - 'new_profile_id': 'FAKE_PROFILE', - }, - } - - res_code, reason = action._update_nodes('FAKE_PROFILE', - [node1, node2]) - self.assertEqual(res_code, action.RES_OK) - self.assertEqual(reason, 'Cluster update completed.') - mock_action.assert_has_calls([ - mock.call(action.context, node1.id, consts.NODE_UPDATE, **kwargs1), - mock.call(action.context, node2.id, consts.NODE_UPDATE, **kwargs2), - ]) - self.assertEqual(1, mock_dep.call_count) - self.assertEqual(2, mock_update.call_count) - mock_start.assert_called_once_with() - - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, profile_id='FAKE_PROFILE', - updated_at=mock.ANY) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_update_nodes_batch_policy(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='node_id1') - node2 = mock.Mock(id='node_id2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE', config={}) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - action.id = 'CLUSTER_ACTION_ID' - action.data = { - 'update': { - 'pause_time': 0.1, - 'min_in_service': 1, - 'plan': [{node1.id}, {node2.id}], - } - } - mock_wait.return_value = (action.RES_OK, 'All dependents completed') - mock_action.side_effect = ['NODE_ACTION1', 'NODE_ACTION2'] - - res_code, reason = action._update_nodes('FAKE_PROFILE', - [node1, node2]) - self.assertEqual(res_code, action.RES_OK) - self.assertEqual(reason, 'Cluster update completed.') - self.assertEqual(2, mock_action.call_count) - self.assertEqual(2, mock_dep.call_count) - self.assertEqual(2, mock_update.call_count) - self.assertEqual(2, mock_start.call_count) - - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE, profile_id='FAKE_PROFILE', - updated_at=mock.ANY) - - @mock.patch.object(ao.Action, 'update') - @mock.patch.object(ab.Action, 'create') - @mock.patch.object(dobj.Dependency, 'create') - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test_update_nodes_fail_wait(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): - node1 = mock.Mock(id='node_id1') - node2 = mock.Mock(id='node_id2') - cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], - ACTIVE='ACTIVE', config={}) - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'new_profile_id': 'FAKE_PROFILE'} - action.id = 'CLUSTER_ACTION_ID' - mock_wait.return_value = (action.RES_ERROR, 'Oops!') - mock_action.side_effect = ['NODE_ACTION1', 'NODE_ACTION2'] - - res_code, reason = action._update_nodes('FAKE_PROFILE', - [node1, node2]) - self.assertEqual(res_code, action.RES_ERROR) - self.assertEqual(reason, 'Failed in updating nodes.') - self.assertEqual(2, mock_action.call_count) - self.assertEqual(1, mock_dep.call_count) - self.assertEqual(2, mock_update.call_count) - mock_start.assert_called_once_with() - cluster.eval_status.assert_called_once_with( - action.context, consts.CLUSTER_UPDATE) diff --git a/senlin/tests/unit/engine/actions/test_update_policy.py b/senlin/tests/unit/engine/actions/test_update_policy.py deleted file mode 100644 index 09df3eb2c..000000000 --- a/senlin/tests/unit/engine/actions/test_update_policy.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -@mock.patch.object(cm.Cluster, 'load') -class ClusterUpdatePolicyTest(base.SenlinTestCase): - - def setUp(self): - super(ClusterUpdatePolicyTest, self).setUp() - self.ctx = utils.dummy_context() - - def test_do_update_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - cluster.update_policy.return_value = True, 'Success.' - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = { - 'policy_id': 'FAKE_POLICY', - 'foo': 'bar', - } - - # do it - res_code, res_msg = action.do_update_policy() - - self.assertEqual(action.RES_OK, res_code) - self.assertEqual('Success.', res_msg) - cluster.update_policy.assert_called_once_with( - action.context, 'FAKE_POLICY', foo='bar') - - def test_do_update_policy_failed_update(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - cluster.update_policy.return_value = False, 'Something is wrong.' - mock_load.return_value = cluster - - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = { - 'policy_id': 'FAKE_POLICY', - 'foo': 'bar', - } - - # do it - res_code, res_msg = action.do_update_policy() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Something is wrong.', res_msg) - cluster.update_policy.assert_called_once_with( - action.context, 'FAKE_POLICY', foo='bar') - - def test_do_update_policy_missing_policy(self, mock_load): - cluster = mock.Mock() - cluster.id = 'FAKE_CLUSTER' - mock_load.return_value = cluster - action = ca.ClusterAction(cluster.id, 'CLUSTER_ACTION', self.ctx) - action.inputs = {'enabled': True} - - # do it - res_code, res_msg = action.do_update_policy() - - self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Policy not specified.', res_msg) diff --git a/senlin/tests/unit/engine/actions/test_wait.py b/senlin/tests/unit/engine/actions/test_wait.py deleted file mode 100644 index 053a3646d..000000000 --- a/senlin/tests/unit/engine/actions/test_wait.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import eventlet -from unittest import mock - -from senlin.engine.actions import base as ab -from senlin.engine.actions import cluster_action as ca -from senlin.engine import cluster as cm -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class ClusterActionWaitTest(base.SenlinTestCase): - scenarios = [ - ('wait_ready', dict( - statuses=[ - ab.Action.WAITING, - ab.Action.READY - ], - cancelled=[False, False], - timeout=[False, False], - failed=[False, False], - code=ab.Action.RES_OK, - rescheduled_times=1, - message='All dependents ended with success') - ), - ('wait_fail', dict( - statuses=[ - ab.Action.WAITING, - ab.Action.FAILED - ], - cancelled=[False, False], - timeout=[False, False], - code=ab.Action.RES_ERROR, - rescheduled_times=1, - message='ACTION [FAKE_ID] failed') - ), - ('wait_wait_cancel', dict( - statuses=[ - ab.Action.WAITING, - ab.Action.WAITING, - ab.Action.WAITING, - ], - cancelled=[False, False, True], - timeout=[False, False, False], - code=ab.Action.RES_CANCEL, - rescheduled_times=2, - message='ACTION [FAKE_ID] cancelled') - ), - ('wait_wait_timeout', dict( - statuses=[ - ab.Action.WAITING, - ab.Action.WAITING, - ab.Action.WAITING, - ], - cancelled=[False, False, False], - timeout=[False, False, True], - code=ab.Action.RES_TIMEOUT, - rescheduled_times=2, - message='ACTION [FAKE_ID] timeout') - ), - - ] - - def setUp(self): - super(ClusterActionWaitTest, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(cm.Cluster, 'load') - @mock.patch.object(eventlet, 'sleep') - def test_wait_dependents(self, mock_reschedule, mock_load): - action = ca.ClusterAction('ID', 'ACTION', self.ctx) - action.id = 'FAKE_ID' - self.patchobject(action, 'get_status', side_effect=self.statuses) - self.patchobject(action, 'is_cancelled', side_effect=self.cancelled) - self.patchobject(action, 'is_timeout', side_effect=self.timeout) - - res_code, res_msg = action._wait_for_dependents() - self.assertEqual(self.code, res_code) - self.assertEqual(self.message, res_msg) - self.assertEqual(self.rescheduled_times, mock_reschedule.call_count) diff --git a/senlin/tests/unit/engine/notifications/__init__.py b/senlin/tests/unit/engine/notifications/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/engine/notifications/test_heat_endpoint.py b/senlin/tests/unit/engine/notifications/test_heat_endpoint.py deleted file mode 100644 index dab335ee2..000000000 --- a/senlin/tests/unit/engine/notifications/test_heat_endpoint.py +++ /dev/null @@ -1,230 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import context -from senlin.engine.notifications import heat_endpoint -from senlin import objects -from senlin.tests.unit.common import base - - -@mock.patch('oslo_messaging.NotificationFilter') -class TestHeatNotificationEndpoint(base.SenlinTestCase): - @mock.patch('senlin.rpc.client.get_engine_client') - def test_init(self, mock_rpc, mock_filter): - x_filter = mock_filter.return_value - event_map = { - 'orchestration.stack.delete.end': 'DELETE', - } - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - - mock_filter.assert_called_once_with( - publisher_id='^orchestration.*', - event_type='^orchestration\.stack\..*', - context={'project_id': '^PROJECT$'}) - mock_rpc.assert_called_once_with() - self.assertEqual(x_filter, endpoint.filter_rule) - self.assertEqual(mock_rpc.return_value, endpoint.rpc) - for e in event_map: - self.assertIn(e, endpoint.STACK_FAILURE_EVENTS) - self.assertEqual(event_map[e], endpoint.STACK_FAILURE_EVENTS[e]) - self.assertEqual('PROJECT', endpoint.project_id) - self.assertEqual('CLUSTER_ID', endpoint.cluster_id) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'tags': { - 'cluster_id=CLUSTER_ID', - 'cluster_node_id=FAKE_NODE', - 'cluster_node_index=123', - }, - 'stack_identity': 'PHYSICAL_ID', - 'user_identity': 'USER', - 'state': 'DELETE_COMPLETE', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('FAKE_NODE', req.identity) - expected_params = { - 'event': 'DELETE', - 'state': 'DELETE_COMPLETE', - 'stack_id': 'PHYSICAL_ID', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD', - } - self.assertEqual(expected_params, req.params) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_event_type_not_interested(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'tags': {'cluster_id': 'CLUSTER_ID'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', - 'orchestration.stack.create.start', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_tag(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'tags': None} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_empty_tag(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'tags': []} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_cluster_in_tag(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'tags': ['foo', 'bar']} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_node_in_tag(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'tags': ['cluster_id=C1ID']} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_cluster_id_not_match(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'tags': ['cluster_id=FOOBAR', 'cluster_node_id=N2'], - 'user_identity': 'USER', - } - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_default_values(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = heat_endpoint.HeatNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'tags': [ - 'cluster_id=CLUSTER_ID', - 'cluster_node_id=NODE_ID' - ], - 'user_identity': 'USER', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'orchestration.stack.delete.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('NODE_ID', req.identity) - expected_params = { - 'event': 'DELETE', - 'state': 'Unknown', - 'stack_id': 'Unknown', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD', - } - self.assertEqual(expected_params, req.params) diff --git a/senlin/tests/unit/engine/notifications/test_message.py b/senlin/tests/unit/engine/notifications/test_message.py deleted file mode 100644 index 239e0233e..000000000 --- a/senlin/tests/unit/engine/notifications/test_message.py +++ /dev/null @@ -1,149 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from unittest import mock - -from oslo_config import cfg - -from senlin.common import exception -from senlin.drivers import base as driver_base -from senlin.engine.notifications import message as mmod -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -UUID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' - - -class TestMessage(base.SenlinTestCase): - def setUp(self): - super(TestMessage, self).setUp() - self.context = utils.dummy_context() - - @mock.patch.object(driver_base, 'SenlinDriver') - def test_zaqar_client(self, mock_senlindriver): - sd = mock.Mock() - zc = mock.Mock() - sd.message.return_value = zc - mock_senlindriver.return_value = sd - - message = mmod.Message('myqueue', user='user1', - project='project1') - - # cached will be returned - message._zaqarclient = zc - self.assertEqual(zc, message.zaqar()) - - # new zaqar client created if no cache found - message._zaqarclient = None - params = mock.Mock() - mock_param = self.patchobject(mmod.Message, '_build_conn_params', - return_value=params) - res = message.zaqar() - self.assertEqual(zc, res) - self.assertEqual(zc, message._zaqarclient) - mock_param.assert_called_once_with('user1', 'project1') - sd.message.assert_called_once_with(params) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_post_lifecycle_hook_message(self, mock_zaqar): - cfg.CONF.set_override('max_message_size', 8192, 'notification') - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - queue_name = 'my_queue' - message = mmod.Message(queue_name) - mock_zc.queue_exists.return_value = True - - lifecycle_action_token = 'ACTION_ID' - node_id = 'NODE_ID' - resource_id = 'RESOURCE_ID' - lifecycle_transition_type = 'TYPE' - - message.post_lifecycle_hook_message(lifecycle_action_token, node_id, - resource_id, - lifecycle_transition_type) - - mock_zc.queue_create.assert_not_called() - - message_list = [{ - "ttl": 300, - "body": { - "lifecycle_action_token": lifecycle_action_token, - "node_id": node_id, - "resource_id": resource_id, - "lifecycle_transition_type": lifecycle_transition_type - } - }] - mock_zc.message_post.assert_called_once_with(queue_name, message_list) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_post_lifecycle_hook_message_queue_nonexistent(self, mock_zaqar): - cfg.CONF.set_override('max_message_size', 8192, 'notification') - cfg.CONF.set_override('ttl', 500, 'notification') - - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - queue_name = 'my_queue' - message = mmod.Message(queue_name) - kwargs = { - '_max_messages_post_size': 8192, - 'description': "Senlin lifecycle hook notification", - 'name': queue_name - } - mock_zc.queue_exists.return_value = False - - lifecycle_action_token = 'ACTION_ID' - node_id = 'NODE_ID' - resource_id = 'RESOURCE_ID' - lifecycle_transition_type = 'TYPE' - - message.post_lifecycle_hook_message(lifecycle_action_token, node_id, - resource_id, - lifecycle_transition_type) - - mock_zc.queue_create.assert_called_once_with(**kwargs) - - message_list = [{ - "ttl": 500, - "body": { - "lifecycle_action_token": lifecycle_action_token, - "node_id": node_id, - "resource_id": resource_id, - "lifecycle_transition_type": lifecycle_transition_type - } - }] - mock_zc.message_post.assert_called_once_with(queue_name, message_list) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_post_lifecycle_hook_message_queue_retry(self, mock_zaqar): - cfg.CONF.set_override('max_message_size', 8192, 'notification') - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - queue_name = 'my_queue' - message = mmod.Message(queue_name) - mock_zc.queue_exists.return_value = True - test_exception = exception.EResourceCreation(type='queue', - message="test") - mock_zc.message_post.side_effect = [ - test_exception, test_exception, None] - - lifecycle_action_token = 'ACTION_ID' - node_id = 'NODE_ID' - resource_id = 'RESOURCE_ID' - lifecycle_transition_type = 'TYPE' - - message.post_lifecycle_hook_message(lifecycle_action_token, node_id, - resource_id, - lifecycle_transition_type) - - mock_zc.queue_create.assert_not_called() - self.assertEqual(3, mock_zc.message_post.call_count) diff --git a/senlin/tests/unit/engine/notifications/test_nova_endpoint.py b/senlin/tests/unit/engine/notifications/test_nova_endpoint.py deleted file mode 100644 index 4aef09701..000000000 --- a/senlin/tests/unit/engine/notifications/test_nova_endpoint.py +++ /dev/null @@ -1,213 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import context -from senlin.engine.notifications import nova_endpoint -from senlin import objects -from senlin.tests.unit.common import base - - -@mock.patch('oslo_messaging.NotificationFilter') -class TestNovaNotificationEndpoint(base.SenlinTestCase): - @mock.patch('senlin.rpc.client.get_engine_client') - def test_init(self, mock_rpc, mock_filter): - x_filter = mock_filter.return_value - event_map = { - 'compute.instance.pause.end': 'PAUSE', - 'compute.instance.power_off.end': 'POWER_OFF', - 'compute.instance.rebuild.error': 'REBUILD', - 'compute.instance.shutdown.end': 'SHUTDOWN', - 'compute.instance.soft_delete.end': 'SOFT_DELETE', - } - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - - mock_filter.assert_called_once_with( - publisher_id='^compute.*', - event_type='^compute\.instance\..*', - context={'project_id': '^PROJECT$'}) - mock_rpc.assert_called_once_with() - self.assertEqual(x_filter, endpoint.filter_rule) - self.assertEqual(mock_rpc.return_value, endpoint.rpc) - for e in event_map: - self.assertIn(e, endpoint.VM_FAILURE_EVENTS) - self.assertEqual(event_map[e], endpoint.VM_FAILURE_EVENTS[e]) - self.assertEqual('PROJECT', endpoint.project_id) - self.assertEqual('CLUSTER_ID', endpoint.cluster_id) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'metadata': { - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_id': 'FAKE_NODE', - 'cluster_node_index': '123', - }, - 'instance_id': 'PHYSICAL_ID', - 'user_id': 'USER', - 'state': 'shutoff', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('FAKE_NODE', req.identity) - expected_params = { - 'event': 'SHUTDOWN', - 'state': 'shutoff', - 'instance_id': 'PHYSICAL_ID', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD' - } - self.assertEqual(expected_params, req.params) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_metadata(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_cluster_in_metadata(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'foo': 'bar'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_cluster_id_not_match(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'FOOBAR'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_event_type_not_interested(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.start', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_node_id(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_default_values(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'metadata': { - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_id': 'NODE_ID' - }, - 'user_id': 'USER', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('NODE_ID', req.identity) - expected_params = { - 'event': 'SHUTDOWN', - 'state': 'Unknown', - 'instance_id': 'Unknown', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD', - } - self.assertEqual(expected_params, req.params) diff --git a/senlin/tests/unit/engine/receivers/__init__.py b/senlin/tests/unit/engine/receivers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/engine/receivers/test_message.py b/senlin/tests/unit/engine/receivers/test_message.py deleted file mode 100644 index e37c13513..000000000 --- a/senlin/tests/unit/engine/receivers/test_message.py +++ /dev/null @@ -1,710 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import socket -from unittest import mock - -from keystoneauth1 import loading as ks_loading -from oslo_config import cfg -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.drivers import base as driver_base -from senlin.engine.actions import base as action_mod -from senlin.engine import dispatcher -from senlin.engine.receivers import message as mmod -from senlin.objects import cluster as co -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -UUID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' - - -class TestMessage(base.SenlinTestCase): - def setUp(self): - super(TestMessage, self).setUp() - self.context = utils.dummy_context() - - @mock.patch.object(driver_base, 'SenlinDriver') - def test_keystone_client(self, mock_senlindriver): - sd = mock.Mock() - kc = mock.Mock() - sd.identity.return_value = kc - mock_senlindriver.return_value = sd - - message = mmod.Message('message', None, None, user='user1', - project='project1') - - # cached will be returned - message._keystoneclient = kc - self.assertEqual(kc, message.keystone()) - - # new keystone client created if no cache found - message._keystoneclient = None - params = mock.Mock() - mock_param = self.patchobject(mmod.Message, '_build_conn_params', - return_value=params) - res = message.keystone() - self.assertEqual(kc, res) - self.assertEqual(kc, message._keystoneclient) - mock_param.assert_called_once_with('user1', 'project1') - sd.identity.assert_called_once_with(params) - - @mock.patch.object(driver_base, 'SenlinDriver') - def test_zaqar_client(self, mock_senlindriver): - sd = mock.Mock() - zc = mock.Mock() - sd.message.return_value = zc - mock_senlindriver.return_value = sd - - message = mmod.Message('message', None, None, user='user1', - project='project1') - - # cached will be returned - message._zaqarclient = zc - self.assertEqual(zc, message.zaqar()) - - # new zaqar client created if no cache found - message._zaqarclient = None - params = mock.Mock() - mock_param = self.patchobject(mmod.Message, '_build_conn_params', - return_value=params) - res = message.zaqar() - self.assertEqual(zc, res) - self.assertEqual(zc, message._zaqarclient) - mock_param.assert_called_once_with('user1', 'project1') - sd.message.assert_called_once_with(params) - - def test_generate_subscriber_url_host_provided(self): - cfg.CONF.set_override('host', 'web.com', 'receiver') - cfg.CONF.set_override('port', '1234', 'receiver') - message = mmod.Message('message', None, None, id=UUID) - res = message._generate_subscriber_url() - - expected = 'trust+http://web.com:1234/v1/receivers/%s/notify' % UUID - self.assertEqual(expected, res) - - @mock.patch.object(mmod.Message, '_get_base_url') - def test_generate_subscriber_url_host_not_provided( - self, mock_get_base_url): - mock_get_base_url.return_value = 'http://web.com:1234/v1' - message = mmod.Message('message', None, None, id=UUID) - res = message._generate_subscriber_url() - - expected = 'trust+http://web.com:1234/v1/receivers/%s/notify' % UUID - self.assertEqual(expected, res) - - @mock.patch.object(socket, 'gethostname') - @mock.patch.object(mmod.Message, '_get_base_url') - def test_generate_subscriber_url_no_host_no_base( - self, mock_get_base_url, mock_gethostname): - mock_get_base_url.return_value = None - mock_gethostname.return_value = 'test-host' - message = mmod.Message('message', None, None, id=UUID) - res = message._generate_subscriber_url() - - expected = 'trust+http://test-host:8777/v1/receivers/%s/notify' % UUID - self.assertEqual(expected, res) - - def test_to_dict(self): - message = mmod.Message('message', None, None, user='user1', - project='project1', id=UUID) - message.channel = {'queue_name': 'test-queue', - 'subscription': 'subscription-id'} - res = message.to_dict() - expected_res = { - 'name': None, - 'id': UUID, - 'user': 'user1', - 'project': 'project1', - 'domain': '', - 'type': 'message', - 'channel': {'queue_name': 'test-queue'}, - 'action': None, - 'cluster_id': None, - 'actor': {}, - 'params': {}, - 'created_at': None, - 'updated_at': None, - } - self.assertEqual(expected_res, res) - - @mock.patch.object(mmod.Message, '_create_queue') - @mock.patch.object(mmod.Message, '_create_subscription') - def test_initialize_channel(self, mock_create_subscription, - mock_create_queue): - mock_sub = mock.Mock() - mock_sub.subscription_id = 'test-subscription-id' - mock_create_subscription.return_value = mock_sub - mock_create_queue.return_value = 'test-queue' - - message = mmod.Message('message', None, None) - res = message.initialize_channel(self.context) - - expected_channel = {'queue_name': 'test-queue', - 'subscription': 'test-subscription-id'} - self.assertEqual(expected_channel, res) - mock_create_queue.assert_called_once_with() - mock_create_subscription.assert_called_once_with('test-queue') - - @mock.patch.object(mmod.Message, 'zaqar') - def test_create_queue(self, mock_zaqar): - cfg.CONF.set_override('max_message_size', 8192, 'receiver') - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - message = mmod.Message('message', None, None, id=UUID) - queue_name = 'senlin-receiver-%s' % message.id - kwargs = { - '_max_messages_post_size': 8192, - 'description': 'Senlin receiver %s.' % message.id, - 'name': queue_name - } - mock_zc.queue_create.return_value = queue_name - res = message._create_queue() - - self.assertEqual(queue_name, res) - mock_zc.queue_create.assert_called_once_with(**kwargs) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_create_queue_fail(self, mock_zaqar): - cfg.CONF.set_override('max_message_size', 8192, 'receiver') - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - message = mmod.Message('message', None, None, id=UUID) - queue_name = 'senlin-receiver-%s' % message.id - kwargs = { - '_max_messages_post_size': 8192, - 'description': 'Senlin receiver %s.' % message.id, - 'name': queue_name - } - mock_zc.queue_create.side_effect = exception.InternalError() - self.assertRaises(exception.EResourceCreation, message._create_queue) - mock_zc.queue_create.assert_called_once_with(**kwargs) - - @mock.patch.object(mmod.Message, '_generate_subscriber_url') - @mock.patch.object(mmod.Message, '_build_trust') - @mock.patch.object(mmod.Message, 'zaqar') - def test_create_subscription(self, mock_zaqar, mock_build_trust, - mock_generate_subscriber_url): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_build_trust.return_value = '123abc' - subscriber = 'subscriber_url' - mock_generate_subscriber_url.return_value = subscriber - message = mmod.Message('message', None, None, id=UUID) - queue_name = 'test-queue' - kwargs = { - "ttl": 2 ** 36, - "subscriber": subscriber, - "options": { - "trust_id": "123abc" - } - } - mock_zc.subscription_create.return_value = 'subscription' - res = message._create_subscription(queue_name) - - self.assertEqual('subscription', res) - mock_generate_subscriber_url.assert_called_once_with() - mock_zc.subscription_create.assert_called_once_with(queue_name, - **kwargs) - - @mock.patch.object(mmod.Message, '_generate_subscriber_url') - @mock.patch.object(mmod.Message, '_build_trust') - @mock.patch.object(mmod.Message, 'zaqar') - def test_create_subscription_fail(self, mock_zaqar, mock_build_trust, - mock_generate_subscriber_url): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_build_trust.return_value = '123abc' - subscriber = 'subscriber_url' - mock_generate_subscriber_url.return_value = subscriber - message = mmod.Message('message', None, None, id=UUID) - message.id = UUID - queue_name = 'test-queue' - kwargs = { - "ttl": 2 ** 36, - "subscriber": subscriber, - "options": { - "trust_id": "123abc" - } - } - - mock_zc.subscription_create.side_effect = exception.InternalError() - self.assertRaises(exception.EResourceCreation, - message._create_subscription, queue_name) - mock_generate_subscriber_url.assert_called_once_with() - mock_zc.subscription_create.assert_called_once_with(queue_name, - **kwargs) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_release_channel(self, mock_zaqar): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - channel = {'queue_name': 'test-queue', - 'subscription': 'test-subscription-id'} - message = mmod.Message('message', None, None, id=UUID, - channel=channel) - - message.release_channel(self.context) - mock_zc.subscription_delete.assert_called_once_with( - 'test-queue', 'test-subscription-id') - mock_zc.queue_delete.assert_called_once_with('test-queue') - - @mock.patch.object(mmod.Message, 'zaqar') - def test_release_channel_subscription_delete_fail(self, mock_zaqar): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - channel = {'queue_name': 'test-queue', - 'subscription': 'test-subscription-id'} - message = mmod.Message('message', None, None, id=UUID, - channel=channel) - mock_zc.subscription_delete.side_effect = exception.InternalError() - - self.assertRaises(exception.EResourceDeletion, - message.release_channel, self.context) - mock_zc.subscription_delete.assert_called_once_with( - 'test-queue', 'test-subscription-id') - - @mock.patch.object(mmod.Message, 'zaqar') - def test_release_channel_queue_delete_fail(self, mock_zaqar): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - channel = {'queue_name': 'test-queue', - 'subscription': 'test-subscription-id'} - message = mmod.Message('message', None, None, id=UUID, - channel=channel) - mock_zc.queue_delete.side_effect = exception.InternalError() - - self.assertRaises(exception.EResourceDeletion, - message.release_channel, self.context) - mock_zc.subscription_delete.assert_called_once_with( - 'test-queue', 'test-subscription-id') - mock_zc.queue_delete.assert_called_once_with('test-queue') - - @mock.patch.object(ks_loading, 'load_auth_from_conf_options') - @mock.patch.object(ks_loading, 'load_session_from_conf_options') - @mock.patch.object(mmod.Message, 'keystone') - def test_build_trust_exists(self, mock_keystone, mock_load_session, - mock_load_auth): - mock_auth = mock.Mock() - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' - mock_load_session.return_value = mock_session - mock_load_auth.return_value = mock_auth - mock_kc = mock.Mock() - mock_keystone.return_value = mock_kc - mock_trust = mock.Mock() - mock_trust.id = 'mock-trust-id' - message = mmod.Message('message', None, None, id=UUID, - user='user1', project='project1', - params={'notifier_roles': ['test-role']}) - mock_kc.trust_get_by_trustor.return_value = mock_trust - - res = message._build_trust() - - self.assertEqual('mock-trust-id', res) - mock_kc.trust_get_by_trustor.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1') - mock_load_auth.assert_called_once_with(cfg.CONF, 'zaqar') - mock_load_session.assert_called_once_with(cfg.CONF, 'zaqar') - mock_session.get_user_id.assert_called_once_with(auth=mock_auth) - - @mock.patch.object(ks_loading, 'load_auth_from_conf_options') - @mock.patch.object(ks_loading, 'load_session_from_conf_options') - @mock.patch.object(mmod.Message, 'keystone') - def test_build_trust_create_new_multiroles( - self, mock_keystone, mock_load_session, mock_load_auth): - mock_auth = mock.Mock() - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' - mock_load_session.return_value = mock_session - mock_load_auth.return_value = mock_auth - mock_kc = mock.Mock() - mock_keystone.return_value = mock_kc - mock_trust = mock.Mock() - mock_trust.id = 'mock-trust-id' - message = mmod.Message('message', None, None, id=UUID, - user='user1', project='project1') - message.notifier_roles = ['test_role'] - mock_kc.trust_get_by_trustor.return_value = None - mock_kc.trust_create.return_value = mock_trust - - res = message._build_trust() - - self.assertEqual('mock-trust-id', res) - mock_kc.trust_get_by_trustor.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1') - mock_kc.trust_create.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1', ['test_role']) - - @mock.patch.object(ks_loading, 'load_auth_from_conf_options') - @mock.patch.object(ks_loading, 'load_session_from_conf_options') - @mock.patch.object(mmod.Message, 'keystone') - def test_build_trust_create_new_single_admin_role( - self, mock_keystone, mock_load_session, mock_load_auth): - mock_auth = mock.Mock() - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' - mock_load_session.return_value = mock_session - mock_load_auth.return_value = mock_auth - mock_kc = mock.Mock() - mock_keystone.return_value = mock_kc - mock_trust = mock.Mock() - mock_trust.id = 'mock-trust-id' - message = mmod.Message('message', None, None, id=UUID, - user='user1', project='project1') - message.notifier_roles = ['admin'] - mock_kc.trust_get_by_trustor.return_value = None - mock_kc.trust_create.return_value = mock_trust - - res = message._build_trust() - - self.assertEqual('mock-trust-id', res) - mock_kc.trust_get_by_trustor.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1') - mock_kc.trust_create.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1', ['admin']) - - @mock.patch.object(ks_loading, 'load_auth_from_conf_options') - @mock.patch.object(ks_loading, 'load_session_from_conf_options') - @mock.patch.object(mmod.Message, 'keystone') - def test_build_trust_create_new_trust_failed(self, mock_keystone, - mock_load_session, - mock_load_auth): - mock_auth = mock.Mock() - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' - mock_load_session.return_value = mock_session - mock_load_auth.return_value = mock_auth - mock_kc = mock.Mock() - mock_keystone.return_value = mock_kc - mock_trust = mock.Mock() - mock_trust.id = 'mock-trust-id' - message = mmod.Message('message', None, None, id=UUID, - user='user1', project='project1') - message.notifier_roles = ['test_role'] - mock_kc.trust_get_by_trustor.return_value = None - mock_kc.trust_create.side_effect = exception.InternalError() - - self.assertRaises(exception.EResourceCreation, - message._build_trust) - - mock_kc.trust_get_by_trustor.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1') - mock_kc.trust_create.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1', ['test_role']) - - @mock.patch.object(ks_loading, 'load_auth_from_conf_options') - @mock.patch.object(ks_loading, 'load_session_from_conf_options') - @mock.patch.object(mmod.Message, 'keystone') - def test_build_trust_get_trust_exception(self, mock_keystone, - mock_load_session, - mock_load_auth): - mock_auth = mock.Mock() - mock_session = mock.Mock() - mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' - mock_load_session.return_value = mock_session - mock_load_auth.return_value = mock_auth - mock_kc = mock.Mock() - mock_keystone.return_value = mock_kc - mock_trust = mock.Mock() - mock_trust.id = 'mock-trust-id' - message = mmod.Message('message', None, None, id=UUID, - user='user1', project='project1') - mock_kc.trust_get_by_trustor.side_effect = exception.InternalError() - - self.assertRaises(exception.EResourceCreation, - message._build_trust) - - mock_kc.trust_get_by_trustor.assert_called_once_with( - 'user1', 'zaqar-trustee-user-id', 'project1') - - @mock.patch.object(co.Cluster, 'get') - def test_find_cluster_by_uuid(self, mock_get): - x_cluster = mock.Mock() - mock_get.return_value = x_cluster - - aid = uuidutils.generate_uuid() - message = mmod.Message('message', None, None, id=UUID) - result = message._find_cluster(self.context, aid) - - self.assertEqual(x_cluster, result) - mock_get.assert_called_once_with(self.context, aid) - - @mock.patch.object(co.Cluster, 'get_by_name') - @mock.patch.object(co.Cluster, 'get') - def test_find_cluster_by_uuid_as_name(self, mock_get, mock_get_name): - x_cluster = mock.Mock() - mock_get_name.return_value = x_cluster - mock_get.return_value = None - - aid = uuidutils.generate_uuid() - message = mmod.Message('message', None, None, id=UUID) - result = message._find_cluster(self.context, aid) - - self.assertEqual(x_cluster, result) - mock_get.assert_called_once_with(self.context, aid) - mock_get_name.assert_called_once_with(self.context, aid) - - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_cluster_by_name(self, mock_get_name): - x_cluster = mock.Mock() - mock_get_name.return_value = x_cluster - - aid = 'this-is-not-uuid' - message = mmod.Message('message', None, None, id=UUID) - result = message._find_cluster(self.context, aid) - - self.assertEqual(x_cluster, result) - mock_get_name.assert_called_once_with(self.context, aid) - - @mock.patch.object(co.Cluster, 'get_by_short_id') - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_cluster_by_shortid(self, mock_get_name, mock_get_shortid): - x_cluster = mock.Mock() - mock_get_shortid.return_value = x_cluster - mock_get_name.return_value = None - - aid = 'abcd-1234-abcd' - message = mmod.Message('message', None, None, id=UUID) - result = message._find_cluster(self.context, aid) - - self.assertEqual(x_cluster, result) - mock_get_name.assert_called_once_with(self.context, aid) - mock_get_shortid.assert_called_once_with(self.context, aid) - - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_cluster_not_found(self, mock_get_name): - mock_get_name.return_value = None - - message = mmod.Message('message', None, None, id=UUID) - self.assertRaises(exception.ResourceNotFound, message._find_cluster, - self.context, 'bogus') - - mock_get_name.assert_called_once_with(self.context, 'bogus') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(mmod.Message, '_build_action') - @mock.patch.object(mmod.Message, 'zaqar') - def test_notify(self, mock_zaqar, mock_build_action, mock_start_action): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_claim = mock.Mock() - mock_claim.id = 'claim_id' - message1 = { - 'body': {'cluster': 'c1', 'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID1' - } - message2 = { - 'body': {'cluster': 'c2', 'action': 'CLUSTER_SCALE_OUT'}, - 'id': 'ID2' - } - mock_claim.messages = [message1, message2] - mock_zc.claim_create.return_value = mock_claim - mock_build_action.side_effect = ['action_id1', 'action_id2'] - - message = mmod.Message('message', None, None, id=UUID) - message.channel = {'queue_name': 'queue1'} - res = message.notify(self.context) - self.assertEqual(['action_id1', 'action_id2'], res) - mock_zc.claim_create.assert_called_once_with('queue1') - mock_zc.claim_delete.assert_called_once_with('queue1', 'claim_id') - mock_calls = [ - mock.call(self.context, message1), - mock.call(self.context, message2) - ] - mock_build_action.assert_has_calls(mock_calls) - mock_start_action.assert_called_once_with() - mock_calls2 = [ - mock.call('queue1', 'ID1', 'claim_id'), - mock.call('queue1', 'ID2', 'claim_id') - ] - mock_zc.message_delete.assert_has_calls(mock_calls2) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_notify_no_message(self, mock_zaqar): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_claim = mock.Mock() - mock_claim.messages = None - mock_zc.claim_create.return_value = mock_claim - - message = mmod.Message('message', None, None, id=UUID) - message.channel = {'queue_name': 'queue1'} - res = message.notify(self.context) - self.assertEqual([], res) - mock_zc.claim_create.assert_called_once_with('queue1') - - @mock.patch.object(dispatcher, 'start_action') - @mock.patch.object(mmod.Message, '_build_action') - @mock.patch.object(mmod.Message, 'zaqar') - def test_notify_some_actions_building_failed(self, mock_zaqar, - mock_build_action, - mock_start_action): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_claim = mock.Mock() - mock_claim.id = 'claim_id' - message1 = { - 'body': {'cluster': 'c1', 'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID1' - } - message2 = { - 'body': {'cluster': 'foo', 'action': 'CLUSTER_SCALE_OUT'}, - 'id': 'ID2' - } - mock_claim.messages = [message1, message2] - mock_zc.claim_create.return_value = mock_claim - mock_build_action.side_effect = [exception.InternalError(), - 'action_id1'] - - message = mmod.Message('message', None, None, id=UUID) - message.channel = {'queue_name': 'queue1'} - res = message.notify(self.context) - self.assertEqual(['action_id1'], res) - mock_zc.claim_create.assert_called_once_with('queue1') - mock_calls = [ - mock.call(self.context, message1), - mock.call(self.context, message2) - ] - mock_build_action.assert_has_calls(mock_calls) - mock_start_action.assert_called_once_with() - mock_calls2 = [ - mock.call('queue1', 'ID1', 'claim_id'), - mock.call('queue1', 'ID2', 'claim_id') - ] - mock_zc.message_delete.assert_has_calls(mock_calls2) - - @mock.patch.object(mmod.Message, 'zaqar') - def test_notify_claiming_message_failed(self, mock_zaqar): - mock_zc = mock.Mock() - mock_zaqar.return_value = mock_zc - mock_zc.claim_create.side_effect = exception.InternalError() - - message = mmod.Message('message', None, None, id=UUID) - message.channel = {'queue_name': 'queue1'} - res = message.notify(self.context) - self.assertIsNone(res) - mock_zc.claim_create.assert_called_once_with('queue1') - - @mock.patch.object(action_mod.Action, 'create') - @mock.patch.object(mmod.Message, '_find_cluster') - def test_build_action(self, mock_find_cluster, mock_action_create): - fake_cluster = mock.Mock() - fake_cluster.user = 'user1' - fake_cluster.id = 'cid1' - mock_find_cluster.return_value = fake_cluster - mock_action_create.return_value = 'action_id1' - msg = { - 'body': {'cluster': 'c1', 'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - message.id = 'ID654321' - message.user = 'user1' - expected_kwargs = { - 'name': 'receiver_ID654321_ID123456', - 'cause': consts.CAUSE_RPC, - 'status': action_mod.Action.READY, - 'inputs': {} - } - - res = message._build_action(self.context, msg) - self.assertEqual('action_id1', res) - mock_find_cluster.assert_called_once_with(self.context, 'c1') - mock_action_create.assert_called_once_with(self.context, 'cid1', - 'CLUSTER_SCALE_IN', - **expected_kwargs) - - def test_build_action_message_body_empty(self): - msg = { - 'body': {}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _('Message body is empty.') - self.assertEqual(ex_msg, ex.message) - - def test_build_action_no_cluster_in_message_body(self): - msg = { - 'body': {'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _('Both cluster identity and action must be specified.') - self.assertEqual(ex_msg, ex.message) - - def test_build_action_no_action_in_message_body(self): - msg = { - 'body': {'cluster': 'c1'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _('Both cluster identity and action must be specified.') - self.assertEqual(ex_msg, ex.message) - - @mock.patch.object(mmod.Message, '_find_cluster') - def test_build_action_cluster_notfound(self, mock_find_cluster): - mock_find_cluster.side_effect = exception.ResourceNotFound( - type='cluster', id='c1') - msg = { - 'body': {'cluster': 'c1', 'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _('Cluster (c1) cannot be found.') - self.assertEqual(ex_msg, ex.message) - - @mock.patch.object(mmod.Message, '_find_cluster') - def test_build_action_permission_denied(self, mock_find_cluster): - fake_cluster = mock.Mock() - fake_cluster.user = 'user1' - mock_find_cluster.return_value = fake_cluster - msg = { - 'body': {'cluster': 'c1', 'action': 'CLUSTER_SCALE_IN'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - message.user = 'user2' - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _('%(user)s is not allowed to trigger actions on ' - 'cluster %(cid)s.') % {'user': message.user, - 'cid': 'c1'} - self.assertEqual(ex_msg, ex.message) - - @mock.patch.object(mmod.Message, '_find_cluster') - def test_build_action_invalid_action_name(self, mock_find_cluster): - fake_cluster = mock.Mock() - fake_cluster.user = 'user1' - mock_find_cluster.return_value = fake_cluster - msg = { - 'body': {'cluster': 'c1', 'action': 'foo'}, - 'id': 'ID123456' - } - message = mmod.Message('message', None, None, id=UUID) - message.user = 'user1' - ex = self.assertRaises(exception.InternalError, message._build_action, - self.context, msg) - ex_msg = _("Illegal cluster action 'foo' specified.") - self.assertEqual(ex_msg, ex.message) diff --git a/senlin/tests/unit/engine/receivers/test_receiver.py b/senlin/tests/unit/engine/receivers/test_receiver.py deleted file mode 100644 index cbb180a55..000000000 --- a/senlin/tests/unit/engine/receivers/test_receiver.py +++ /dev/null @@ -1,411 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_context import context as oslo_ctx -from oslo_utils import timeutils - -from senlin.common import context -from senlin.common import exception -from senlin.common import utils as common_utils -from senlin.drivers import base as driver_base -from senlin.engine.receivers import base as rb -from senlin.engine.receivers import message as rm -from senlin.engine.receivers import webhook as rw -from senlin.objects import credential as co -from senlin.objects import receiver as ro -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' -UUID1 = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -UUID2 = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea' - - -class TestReceiver(base.SenlinTestCase): - - def setUp(self): - super(TestReceiver, self).setUp() - self.context = utils.dummy_context() - self.actor = { - 'auth_url': 'TEST_URL', - 'user_id': '123', - 'password': 'abc' - } - self.params = { - 'key1': 'value1', - 'key2': 'value2', - } - - def _create_receiver(self, receiver_name, receiver_id=None): - values = { - 'id': receiver_id, - 'name': receiver_name, - 'type': 'webhook', - 'cluster_id': CLUSTER_ID, - 'action': 'test-action', - 'user': self.context.user_id, - 'project': self.context.project_id, - 'domain': self.context.domain_id, - 'created_at': timeutils.utcnow(True), - 'updated_at': None, - 'actor': self.actor, - 'params': self.params, - 'channel': None, - } - - return ro.Receiver.create(self.context, values) - - def test_receiver_init(self): - kwargs = { - 'id': UUID1, - 'name': 'test-receiver', - 'user': 'test-user', - 'project': 'test-project', - 'domain': 'test-domain', - 'created_at': timeutils.utcnow(True), - 'updated_at': None, - 'actor': self.actor, - 'params': self.params, - 'channel': {'alarm_url': 'http://url1'}, - } - - receiver = rb.Receiver('webhook', CLUSTER_ID, 'test-action', **kwargs) - - self.assertEqual(kwargs['id'], receiver.id) - self.assertEqual(kwargs['name'], receiver.name) - self.assertEqual('webhook', receiver.type) - self.assertEqual('test-action', receiver.action) - self.assertEqual(kwargs['user'], receiver.user) - self.assertEqual(kwargs['project'], receiver.project) - self.assertEqual(kwargs['domain'], receiver.domain) - - self.assertEqual(kwargs['created_at'], receiver.created_at) - self.assertEqual(kwargs['updated_at'], receiver.updated_at) - - self.assertEqual(CLUSTER_ID, receiver.cluster_id) - self.assertEqual('test-action', receiver.action) - self.assertEqual(self.actor, receiver.actor) - self.assertEqual(self.params, receiver.params) - self.assertEqual(kwargs['channel'], receiver.channel) - - def test_receiver_init_default_value(self): - receiver = rb.Receiver('webhook', CLUSTER_ID, 'test-action') - self.assertIsNone(receiver.id) - self.assertIsNone(receiver.name) - self.assertEqual('webhook', receiver.type) - self.assertEqual('', receiver.user) - self.assertEqual('', receiver.project) - self.assertEqual('', receiver.domain) - - self.assertIsNone(receiver.created_at) - self.assertIsNone(receiver.updated_at) - - self.assertEqual(CLUSTER_ID, receiver.cluster_id) - self.assertEqual('test-action', receiver.action) - self.assertEqual({}, receiver.actor) - self.assertEqual({}, receiver.params) - self.assertEqual({}, receiver.channel) - - def test_receiver_store(self): - receiver = rb.Receiver('webhook', CLUSTER_ID, 'test-action', - name='test_receiver_123456', - project=self.context.project_id) - self.assertIsNone(receiver.id) - - receiver_id = receiver.store(self.context) - - self.assertIsNotNone(receiver_id) - self.assertEqual(receiver_id, receiver.id) - - result = ro.Receiver.get(self.context, receiver_id) - - self.assertIsNotNone(result) - self.assertEqual(receiver_id, result.id) - self.assertEqual(receiver.type, result.type) - self.assertEqual(receiver.name, result.name) - self.assertEqual(receiver.user, result.user) - self.assertEqual(receiver.project, result.project) - self.assertEqual(receiver.domain, result.domain) - self.assertEqual(common_utils.isotime(receiver.created_at), - common_utils.isotime(result.created_at)), - self.assertEqual(receiver.updated_at, result.updated_at) - self.assertEqual(receiver.action, result.action) - self.assertEqual(receiver.actor, result.actor) - self.assertEqual(receiver.params, result.params) - self.assertEqual(receiver.channel, result.channel) - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(rw.Webhook, 'initialize_channel') - def test_receiver_create_webhook_admin(self, mock_initialize_channel, - mock_c_get): - mock_c_get.return_value = { - 'cred': {'openstack': {'trust': '123abc'}} - } - ctx = utils.dummy_context(is_admin=True) - cluster = mock.Mock() - cluster.id = CLUSTER_ID - cluster.user = 'user1' - cluster.project = 'project1' - receiver = rb.Receiver.create(ctx, 'webhook', cluster, - 'FAKE_ACTION', - name='test_receiver_2234') - - self.assertEqual(ctx.user_id, receiver.user) - self.assertEqual(ctx.project_id, receiver.project) - self.assertEqual(ctx.domain_id, receiver.domain) - self.assertEqual('123abc', receiver.actor['trust_id']) - mock_c_get.assert_called_once_with(ctx, 'user1', 'project1') - - @mock.patch.object(rw.Webhook, 'initialize_channel') - def test_receiver_create_webhook_non_admin(self, mock_initialize_channel): - ctx = utils.dummy_context(is_admin=False) - cluster = mock.Mock() - cluster.id = CLUSTER_ID - receiver = rb.Receiver.create(ctx, 'webhook', cluster, - 'FAKE_ACTION', - name='test_receiver_2234') - - self.assertEqual(ctx.user_id, receiver.user) - self.assertEqual(ctx.project_id, receiver.project) - self.assertEqual(ctx.domain_id, receiver.domain) - self.assertIsNone(receiver.actor['trust_id']) - - @mock.patch.object(rm.Message, 'initialize_channel') - def test_receiver_create_message(self, mock_initialize_channel): - receiver = rb.Receiver.create(self.context, 'message', None, - None, name='test_receiver_2234') - - self.assertEqual(self.context.user_id, receiver.user) - self.assertEqual(self.context.project_id, receiver.project) - self.assertEqual(self.context.domain_id, receiver.domain) - - def _verify_receiver(self, receiver, result): - self.assertEqual(receiver.id, result.id) - self.assertEqual(receiver.name, result.name) - self.assertEqual(receiver.type, result.type) - self.assertEqual(receiver.user, result.user) - self.assertEqual(receiver.project, result.project) - self.assertEqual(receiver.domain, result.domain) - - self.assertEqual(receiver.created_at, result.created_at) - self.assertEqual(receiver.updated_at, result.updated_at) - - self.assertEqual(receiver.cluster_id, result.cluster_id) - self.assertEqual(receiver.actor, result.actor) - self.assertEqual(receiver.action, result.action) - self.assertEqual(receiver.params, result.params) - self.assertEqual(receiver.channel, result.channel) - - def test_receiver_load_with_id(self): - receiver = self._create_receiver('receiver-1', UUID1) - result = rb.Receiver.load(self.context, receiver_id=receiver.id) - self._verify_receiver(receiver, result) - - def test_receiver_load_with_object(self): - receiver = self._create_receiver('receiver-1', UUID1) - result = rb.Receiver.load(self.context, receiver_obj=receiver) - self._verify_receiver(receiver, result) - - def test_receiver_load_not_found(self): - ex = self.assertRaises(exception.ResourceNotFound, - rb.Receiver.load, - self.context, 'fake-receiver', None) - self.assertEqual("The receiver 'fake-receiver' could not " - "be found.", str(ex)) - - def test_receiver_load_diff_project(self): - receiver = self._create_receiver('receiver-1', UUID1) - - new_context = utils.dummy_context(project='a-different-project') - ex = self.assertRaises(exception.ResourceNotFound, - rb.Receiver.load, - new_context, UUID1, None) - self.assertEqual("The receiver '%s' could not be found." % UUID1, - str(ex)) - - res = rb.Receiver.load(new_context, receiver.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(receiver.id, res.id) - - def test_receiver_to_dict(self): - receiver = self._create_receiver('test-receiver', UUID1) - self.assertIsNotNone(receiver.id) - expected = { - 'id': receiver.id, - 'name': receiver.name, - 'type': receiver.type, - 'user': receiver.user, - 'project': receiver.project, - 'domain': receiver.domain, - 'cluster_id': receiver.cluster_id, - 'action': receiver.action, - 'actor': receiver.actor, - 'params': receiver.params, - 'created_at': common_utils.isotime(receiver.created_at), - 'updated_at': common_utils.isotime(receiver.updated_at), - 'channel': None, - } - - result = rb.Receiver.load(self.context, receiver_id=receiver.id) - self.assertEqual(expected, result.to_dict()) - - def test_release_channel(self): - receiver = self._create_receiver('test-receiver', UUID1) - receiver = rb.Receiver.load(self.context, UUID1) - res = receiver.release_channel(self.context) - self.assertIsNone(res) - - def test_notify(self): - receiver = self._create_receiver('test-receiver', UUID1) - receiver = rb.Receiver.load(self.context, UUID1) - res = receiver.notify(self.context) - self.assertIsNone(res) - - @mock.patch.object(ro.Receiver, 'delete') - @mock.patch.object(rb.Receiver, 'load') - def test_receiver_delete(self, mock_load, mock_delete): - mock_receiver = mock.Mock() - mock_receiver.id = 'test-receiver-id' - mock_load.return_value = mock_receiver - - rb.Receiver.delete(self.context, 'test-receiver-id') - - mock_load.assert_called_once_with(self.context, - receiver_id='test-receiver-id') - mock_receiver.release_channel.assert_called_once_with(self.context) - mock_delete.assert_called_once_with(self.context, 'test-receiver-id') - - @mock.patch.object(context, "get_service_credentials") - @mock.patch.object(driver_base, "SenlinDriver") - def test_get_base_url_succeeded(self, mock_senlin_driver, - mock_get_service_creds): - cfg.CONF.set_override('default_region_name', 'RegionOne') - fake_driver = mock.Mock() - fake_kc = mock.Mock() - fake_cred = mock.Mock() - mock_senlin_driver.return_value = fake_driver - fake_driver.identity.return_value = fake_kc - mock_get_service_creds.return_value = fake_cred - - fake_kc.get_senlin_endpoint.return_value = "http://web.com:1234/v1" - - receiver = rb.Receiver( - 'webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1, params={'KEY': 884, 'FOO': 'BAR'}) - - res = receiver._get_base_url() - self.assertEqual("http://web.com:1234/v1", res) - mock_get_service_creds.assert_called_once_with() - fake_kc.get_senlin_endpoint.assert_called_once_with() - - @mock.patch.object(context, "get_service_credentials") - @mock.patch.object(driver_base, "SenlinDriver") - def test_get_base_url_failed_get_endpoint_exception( - self, mock_senlin_driver, mock_get_service_creds): - cfg.CONF.set_override('default_region_name', 'RegionOne') - fake_driver = mock.Mock() - fake_kc = mock.Mock() - fake_cred = mock.Mock() - mock_senlin_driver.return_value = fake_driver - fake_driver.identity.return_value = fake_kc - mock_get_service_creds.return_value = fake_cred - - fake_kc.get_senlin_endpoint.side_effect = exception.InternalError( - message='Error!') - - receiver = rb.Receiver( - 'webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1, params={'KEY': 884, 'FOO': 'BAR'}) - - res = receiver._get_base_url() - self.assertIsNone(res) - mock_get_service_creds.assert_called_once_with() - fake_kc.get_senlin_endpoint.assert_called_once_with() - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(context, 'get_service_credentials') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params(self, mock_get_current, mock_get_service_creds, - mock_cred_get): - user = 'user1' - project = 'project1' - service_cred = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123', - 'project_domain_name': 'default', - 'verify': True, - 'interface': 'internal', - } - current_ctx = { - 'auth_url': 'auth_url', - 'user_name': user, - 'user_domain_name': 'default', - 'password': '456' - } - cred_info = { - 'openstack': { - 'trust': 'TRUST_ID', - } - } - - cred = mock.Mock() - cred.cred = cred_info - mock_get_service_creds.return_value = service_cred - mock_get_current.return_value = current_ctx - mock_cred_get.return_value = cred - - receiver = self._create_receiver('receiver-1', UUID1) - receiver = rb.Receiver.load(self.context, receiver_obj=receiver) - expected_result = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123', - 'trust_id': 'TRUST_ID', - 'project_domain_name': 'default', - 'verify': True, - 'interface': 'internal', - } - res = receiver._build_conn_params(user, project) - self.assertEqual(expected_result, res) - mock_get_service_creds.assert_called_once_with() - mock_cred_get.assert_called_once_with(current_ctx, user, project) - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(context, 'get_service_credentials') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params_trust_not_found( - self, mock_get_current, mock_get_service_creds, mock_cred_get): - - user = 'user1' - project = 'project1' - service_cred = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123' - } - - mock_get_service_creds.return_value = service_cred - mock_cred_get.return_value = None - - receiver = self._create_receiver('receiver-1', UUID1) - receiver = rb.Receiver.load(self.context, receiver_obj=receiver) - ex = self.assertRaises(exception.TrustNotFound, - receiver._build_conn_params, user, project) - msg = "The trust for trustor 'user1' could not be found." - self.assertEqual(msg, str(ex)) diff --git a/senlin/tests/unit/engine/receivers/test_webhook.py b/senlin/tests/unit/engine/receivers/test_webhook.py deleted file mode 100644 index 276f5cc6e..000000000 --- a/senlin/tests/unit/engine/receivers/test_webhook.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -from unittest import mock - -from oslo_config import cfg - -from senlin.engine.receivers import webhook as wmod -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' -UUID1 = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -UUID2 = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea' - - -class TestWebhook(base.SenlinTestCase): - def setUp(self): - super(TestWebhook, self).setUp() - self.context = utils.dummy_context() - - def test_initialize_channel_host_provided(self): - cfg.CONF.set_override('host', 'web.com', 'receiver') - cfg.CONF.set_override('port', '1234', 'receiver') - webhook = wmod.Webhook('webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1) - channel = webhook.initialize_channel(self.context) - - expected = { - 'alarm_url': ('http://web.com:1234/v1/webhooks/%s/trigger' - '?V=2' % UUID1) - } - self.assertEqual(expected, channel) - self.assertEqual(expected, webhook.channel) - - @mock.patch.object(wmod.Webhook, "_get_base_url") - def test_initialize_channel_host_not_provided(self, mock_get_base_url): - mock_get_base_url.return_value = 'http://web.com:1234/v1' - webhook = wmod.Webhook('webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1) - channel = webhook.initialize_channel(self.context) - - expected = { - 'alarm_url': ('http://web.com:1234/v1/webhooks/%s/trigger' - '?V=2' % UUID1) - } - self.assertEqual(expected, channel) - self.assertEqual(expected, webhook.channel) - - @mock.patch.object(socket, "gethostname") - @mock.patch.object(wmod.Webhook, "_get_base_url") - def test_initialize_channel_no_host_no_base(self, mock_get_base_url, - mock_gethostname): - mock_get_base_url.return_value = None - mock_gethostname.return_value = 'test-host' - webhook = wmod.Webhook('webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1) - channel = webhook.initialize_channel(self.context) - - expected = { - 'alarm_url': ('http://test-host:8777/v1/webhooks/%s/trigger' - '?V=2' % UUID1) - } - self.assertEqual(expected, channel) - self.assertEqual(expected, webhook.channel) - - def test_initialize_channel_with_params(self): - cfg.CONF.set_override('host', 'web.com', 'receiver') - cfg.CONF.set_override('port', '1234', 'receiver') - webhook = wmod.Webhook( - 'webhook', CLUSTER_ID, 'FAKE_ACTION', - id=UUID1, params={'KEY': 884, 'FOO': 'BAR'}) - - channel = webhook.initialize_channel(self.context) - - expected = { - 'alarm_url': ('http://web.com:1234/v1/webhooks/%s/trigger' - '?V=2&FOO=BAR&KEY=884' % UUID1) - } - self.assertEqual(expected, channel) - self.assertEqual(expected, webhook.channel) diff --git a/senlin/tests/unit/engine/test_cluster.py b/senlin/tests/unit/engine/test_cluster.py deleted file mode 100644 index 983e70c59..000000000 --- a/senlin/tests/unit/engine/test_cluster.py +++ /dev/null @@ -1,1072 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from senlin.common import consts -from senlin.common import exception -from senlin.engine import cluster as cm -from senlin.engine import cluster_policy as cpm -from senlin.engine import health_manager -from senlin.engine import node as node_mod -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cpo -from senlin.objects import node as no -from senlin.policies import base as pcb -from senlin.profiles import base as pfb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -PROFILE_ID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -CLUSTER_ID = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea' -POLICY_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - - -class TestCluster(base.SenlinTestCase): - - def setUp(self): - super(TestCluster, self).setUp() - self.context = utils.dummy_context(project='cluster_test_project') - - def test_init(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - self.assertIsNone(cluster.id) - self.assertEqual('test-cluster', cluster.name) - self.assertEqual(PROFILE_ID, cluster.profile_id) - self.assertEqual('', cluster.user) - self.assertEqual('', cluster.project) - self.assertEqual('', cluster.domain) - - self.assertIsNone(cluster.init_at) - self.assertIsNone(cluster.created_at) - self.assertIsNone(cluster.updated_at) - - self.assertEqual(0, cluster.min_size) - self.assertEqual(-1, cluster.max_size) - self.assertEqual(0, cluster.desired_capacity) - self.assertEqual(1, cluster.next_index) - self.assertEqual(cfg.CONF.default_action_timeout, cluster.timeout) - self.assertEqual('INIT', cluster.status) - self.assertEqual('Initializing', cluster.status_reason) - self.assertEqual({}, cluster.data) - self.assertEqual({}, cluster.metadata) - self.assertEqual({}, cluster.dependents) - self.assertEqual({}, cluster.config) - self.assertEqual({'profile': None, 'nodes': [], 'policies': []}, - cluster.rt) - - def test_init_with_none(self): - kwargs = { - 'min_size': None, - 'max_size': None, - 'metadata': None - } - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, **kwargs) - self.assertEqual(0, cluster.min_size) - self.assertEqual(-1, cluster.max_size) - self.assertEqual({}, cluster.metadata) - - @mock.patch.object(cm.Cluster, '_load_runtime_data') - def test_init_with_context(self, mock_load): - cm.Cluster('test-cluster', 0, PROFILE_ID, context=self.context) - mock_load.assert_called_once_with(self.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(pcb.Policy, 'load') - @mock.patch.object(pfb.Profile, 'load') - @mock.patch.object(no.Node, 'get_all_by_cluster') - def test_load_runtime_data(self, mock_nodes, mock_profile, mock_policy, - mock_pb): - x_binding = mock.Mock() - x_binding.policy_id = POLICY_ID - mock_pb.return_value = [x_binding] - x_policy = mock.Mock() - mock_policy.return_value = x_policy - x_profile = mock.Mock() - mock_profile.return_value = x_profile - x_node_1 = mock.Mock() - x_node_2 = mock.Mock() - mock_nodes.return_value = [x_node_1, x_node_2] - - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - - cluster._load_runtime_data(self.context) - - rt = cluster.rt - self.assertEqual(x_profile, rt['profile']) - self.assertEqual([x_node_1, x_node_2], rt['nodes']) - self.assertEqual(2, len(rt['nodes'])) - self.assertIsInstance(rt['nodes'], list) - self.assertEqual([x_policy], rt['policies']) - - mock_pb.assert_called_once_with(self.context, CLUSTER_ID) - mock_policy.assert_called_once_with(self.context, - POLICY_ID, - project_safe=False) - mock_profile.assert_called_once_with(self.context, - profile_id=PROFILE_ID, - project_safe=False) - mock_nodes.assert_called_once_with(self.context, CLUSTER_ID) - - def test_load_runtime_data_id_is_none(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - cluster._load_runtime_data(self.context) - - rt = cluster.rt - self.assertIsNone(rt['profile']) - self.assertEqual([], rt['nodes']) - self.assertEqual(0, len(rt['nodes'])) - self.assertIsInstance(rt['nodes'], list) - self.assertEqual([], rt['policies']) - - def test_store_for_create(self): - utils.create_profile(self.context, PROFILE_ID) - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, - user=self.context.user_id, - project=self.context.project_id) - mock_load = self.patchobject(cluster, '_load_runtime_data') - self.assertIsNone(cluster.id) - - cluster_id = cluster.store(self.context) - self.assertIsNotNone(cluster_id) - mock_load.assert_called_once_with(self.context) - - result = co.Cluster.get(self.context, cluster_id=cluster_id) - - self.assertIsNotNone(result) - self.assertEqual('test-cluster', result.name) - self.assertEqual(PROFILE_ID, result.profile_id) - self.assertEqual(self.context.user_id, result.user) - self.assertEqual(self.context.project_id, result.project) - self.assertEqual(self.context.domain_id, result.domain) - - self.assertIsNotNone(result.init_at) - self.assertIsNone(result.created_at) - self.assertIsNone(result.updated_at) - - self.assertEqual(0, result.min_size) - self.assertEqual(-1, result.max_size) - self.assertEqual(0, result.desired_capacity) - self.assertEqual(1, result.next_index) - self.assertEqual(cfg.CONF.default_action_timeout, result.timeout) - self.assertEqual('INIT', result.status) - self.assertEqual('Initializing', result.status_reason) - self.assertEqual({}, result.data) - self.assertEqual({}, result.metadata) - - def test_store_for_update(self): - utils.create_profile(self.context, PROFILE_ID) - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, - user=self.context.user_id, - project=self.context.project_id) - mock_load = self.patchobject(cluster, '_load_runtime_data') - self.assertIsNone(cluster.id) - - cluster_id = cluster.store(self.context) - - self.assertIsNotNone(cluster_id) - mock_load.assert_called_once_with(self.context) - - # do an update - cluster.name = 'test-cluster-1' - - cluster.min_size = 1 - cluster.max_size = 3 - cluster.desired_capacity = 2 - cluster.timeout = 120 - cluster.data = {'FOO': 'BAR'} - cluster.metadata = {'KEY': 'VALUE'} - cluster.config = {'KEY': 'VALUE'} - - new_id = cluster.store(self.context) - self.assertEqual(cluster_id, new_id) - - result = co.Cluster.get(self.context, cluster_id) - self.assertIsNotNone(result) - - self.assertEqual('test-cluster-1', result.name) - self.assertEqual(self.context.user_id, result.user) - self.assertEqual(self.context.project_id, result.project) - - self.assertEqual(1, result.min_size) - self.assertEqual(3, result.max_size) - self.assertEqual(2, result.desired_capacity) - - self.assertEqual(120, result.timeout) - self.assertEqual({'FOO': 'BAR'}, result.data) - self.assertEqual({'KEY': 'VALUE'}, result.metadata) - self.assertEqual({'KEY': 'VALUE'}, result.config) - - @mock.patch.object(cm.Cluster, '_from_object') - def test_load_via_db_object(self, mock_init): - x_obj = mock.Mock() - - result = cm.Cluster.load(self.context, dbcluster=x_obj) - - self.assertEqual(mock_init.return_value, result) - mock_init.assert_called_once_with(self.context, x_obj) - - @mock.patch.object(co.Cluster, 'get') - @mock.patch.object(cm.Cluster, '_from_object') - def test_load_via_cluster_id(self, mock_init, mock_get): - x_obj = mock.Mock() - mock_get.return_value = x_obj - - result = cm.Cluster.load(self.context, cluster_id=CLUSTER_ID) - - self.assertEqual(mock_init.return_value, result) - mock_get.assert_called_once_with(self.context, CLUSTER_ID, - project_safe=True) - mock_init.assert_called_once_with(self.context, x_obj) - - @mock.patch.object(co.Cluster, 'get') - def test_load_not_found(self, mock_get): - mock_get.return_value = None - ex = self.assertRaises(exception.ResourceNotFound, - cm.Cluster.load, - self.context, cluster_id=CLUSTER_ID) - self.assertEqual("The cluster '%s' could not be found." % CLUSTER_ID, - str(ex)) - mock_get.assert_called_once_with(self.context, CLUSTER_ID, - project_safe=True) - - @mock.patch.object(cm.Cluster, '_from_object') - @mock.patch.object(co.Cluster, 'get_all') - def test_load_all(self, mock_get, mock_init): - x_obj_1 = mock.Mock() - x_obj_2 = mock.Mock() - mock_get.return_value = [x_obj_1, x_obj_2] - - x_cluster_1 = mock.Mock() - x_cluster_2 = mock.Mock() - mock_init.side_effect = [x_cluster_1, x_cluster_2] - - result = cm.Cluster.load_all(self.context) - - self.assertEqual([x_cluster_1, x_cluster_2], [c for c in result]) - mock_get.assert_called_once_with(self.context, - limit=None, marker=None, - sort=None, filters=None, - project_safe=True) - mock_init.assert_has_calls([ - mock.call(self.context, x_obj_1), - mock.call(self.context, x_obj_2)]) - - @mock.patch.object(co.Cluster, 'update') - def test_set_status_for_create(self, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, - id=CLUSTER_ID, status='CREATING') - - cluster.set_status(self.context, consts.CS_ACTIVE, 'Cluster created') - - self.assertEqual(consts.CS_ACTIVE, cluster.status) - self.assertEqual('Cluster created', cluster.status_reason) - self.assertIsNotNone(cluster.created_at) - self.assertIsNone(cluster.updated_at) - - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - { - 'created_at': mock.ANY, - 'status': consts.CS_ACTIVE, - 'status_reason': 'Cluster created' - } - ) - - @mock.patch.object(co.Cluster, 'update') - def test_set_status_for_update(self, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, - id=CLUSTER_ID, status='UPDATING') - - cluster.set_status(self.context, consts.CS_ACTIVE, 'Cluster updated') - - self.assertEqual(consts.CS_ACTIVE, cluster.status) - self.assertEqual('Cluster updated', cluster.status_reason) - self.assertIsNotNone(cluster.updated_at) - - @mock.patch.object(co.Cluster, 'update') - def test_set_status_for_resize(self, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, - id=CLUSTER_ID, status='RESIZING') - - cluster.set_status(self.context, consts.CS_ACTIVE, 'Cluster resized') - - self.assertEqual(consts.CS_ACTIVE, cluster.status) - self.assertEqual('Cluster resized', cluster.status_reason) - self.assertIsNotNone(cluster.updated_at) - - @mock.patch.object(pfb.Profile, 'load') - @mock.patch.object(co.Cluster, 'update') - def test_set_status_for_update_with_profile(self, mock_update, - mock_load): - x_profile = mock.Mock() - mock_load.return_value = x_profile - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, id=CLUSTER_ID, - status='UPDATING') - - new_profile_id = 'a64f0b03-4b77-49d5-89e0-7bcc77c4ce67' - cluster.set_status(self.context, consts.CS_ACTIVE, 'Cluster updated', - profile_id=new_profile_id) - - self.assertEqual(consts.CS_ACTIVE, cluster.status) - self.assertEqual('Cluster updated', cluster.status_reason) - self.assertIsNotNone(cluster.updated_at) - self.assertEqual(x_profile, cluster.rt['profile']) - self.assertEqual(new_profile_id, cluster.profile_id) - mock_load.assert_called_once_with(self.context, - profile_id=new_profile_id) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - { - 'status': consts.CS_ACTIVE, - 'status_reason': 'Cluster updated', - 'profile_id': new_profile_id, - 'updated_at': mock.ANY, - } - ) - - @mock.patch.object(co.Cluster, 'update') - def test_set_status_without_reason(self, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, id=CLUSTER_ID, - status='UPDATING', - status_reason='Update in progress') - - cluster.set_status(self.context, consts.CS_WARNING) - - self.assertEqual(consts.CS_WARNING, cluster.status) - self.assertEqual('Update in progress', cluster.status_reason) - mock_update.assert_called_once_with(self.context, CLUSTER_ID, - {'status': consts.CS_WARNING}) - - @mock.patch.object(pfb.Profile, "create_cluster_object") - def test_do_create(self, mock_create_cluster): - mock_create_cluster.return_value = None - - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_create(self.context) - - self.assertTrue(res) - mock_status.assert_called_once_with( - self.context, consts.CS_CREATING, 'Creation in progress') - - def test_do_create_wrong_status(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.status = consts.CS_ACTIVE - - res = cluster.do_create(self.context) - - self.assertFalse(res) - - @mock.patch.object(pfb.Profile, "delete_cluster_object") - @mock.patch.object(co.Cluster, 'delete') - def test_do_delete(self, mock_delete, mock_delete_cluster): - mock_delete.return_value = None - mock_delete_cluster.return_value = None - - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_delete(self.context) - - mock_delete.assert_called_once_with(self.context, CLUSTER_ID) - self.assertTrue(res) - mock_status.assert_called_once_with( - self.context, consts.CS_DELETING, 'Deletion in progress') - - def test_do_update(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_update(self.context) - - mock_status.assert_called_once_with(self.context, consts.CS_UPDATING, - 'Update in progress') - self.assertTrue(res) - - def test_do_check(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_check(self.context) - - mock_status.assert_called_once_with(self.context, consts.CS_CHECKING, - 'Check in progress') - self.assertTrue(res) - - def test_do_recover(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_recover(self.context) - - mock_status.assert_called_once_with(self.context, consts.CS_RECOVERING, - 'Recovery in progress') - self.assertTrue(res) - - def test_do_operation(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - mock_status = self.patchobject(cluster, 'set_status') - - res = cluster.do_operation(self.context, operation='dance') - - mock_status.assert_called_once_with(self.context, consts.CS_OPERATING, - 'Operation dance in progress') - self.assertTrue(res) - - def test_nodes_property(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - self.assertEqual([], cluster.nodes) - - # with nodes - node1 = mock.Mock() - node2 = mock.Mock() - cluster.rt['nodes'] = [node1, node2] - - self.assertEqual([node1, node2], cluster.nodes) - - def test_policies_property(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - self.assertEqual([], cluster.policies) - - # with policies attached - policy1 = mock.Mock() - policy2 = mock.Mock() - cluster.rt['policies'] = [policy1, policy2] - self.assertEqual([policy1, policy2], cluster.policies) - - def test_add_node(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - self.assertEqual([], cluster.nodes) - - # add one node - node = mock.Mock() - cluster.add_node(node) - self.assertEqual([node], cluster.nodes) - - # add another node - another_node = mock.Mock() - cluster.add_node(another_node) - self.assertEqual([node, another_node], cluster.nodes) - - def test_remove_node(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - self.assertEqual([], cluster.nodes) - - # remove from empty list should be okay - res = cluster.remove_node('BOGUS') - self.assertIsNone(res) - - # add one node - node1 = mock.Mock() - node1.id = '62d52dd6-5f83-4340-b079-349da2f9ffd9' - cluster.add_node(node1) - self.assertEqual([node1], cluster.nodes) - - # remove non-existent node should be okay - node2 = mock.Mock() - node2.id = 'd68214b2-e466-457f-a661-c8413a094a10' - res = cluster.remove_node(node2) - self.assertIsNone(res) - self.assertEqual([node1], cluster.nodes) - - # add another node - cluster.add_node(node2) - self.assertEqual([node1, node2], cluster.nodes) - - # remove first node - res = cluster.remove_node(node1.id) - self.assertIsNone(res) - self.assertEqual([node2], cluster.nodes) - - # reload and remove node - node3 = mock.Mock() - node3.id = 'd68214b2-e466-457f-a661-c8413a094a10' - - res = cluster.remove_node(node3.id) - self.assertIsNone(res) - self.assertEqual([], cluster.nodes) - - def test_update_node(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - self.assertEqual([], cluster.nodes) - - node1 = mock.Mock(id='fake', status='ACTIVE') - # add one - cluster.add_node(node1) - - node1.status = 'ERROR' - cluster.update_node([node1]) - self.assertEqual([node1], cluster.nodes) - - # update new ones - node2 = mock.Mock(id='fake1', status='ACTIVE') - node3 = mock.Mock(id='fake2', status='ERROR') - cluster.update_node([node2, node3]) - self.assertEqual([node2, node3], cluster.nodes) - - @mock.patch.object(pcb.Policy, 'load') - @mock.patch.object(cpm, 'ClusterPolicy') - def test_attach_policy(self, mock_cp, mock_load): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - - policy = mock.Mock() - policy.attach.return_value = (True, None) - policy.PRIORITY = 10 - mock_load.return_value = policy - - binding = mock.Mock() - mock_cp.return_value = binding - - values = {'enabled': True} - cluster.attach_policy(self.context, POLICY_ID, values) - policy.attach.assert_called_once_with(cluster, enabled=True) - mock_load.assert_called_once_with(self.context, POLICY_ID) - mock_cp.assert_called_once_with(CLUSTER_ID, POLICY_ID, priority=10, - enabled=True, data=None) - binding.store.assert_called_once_with(self.context) - self.assertIn(policy, cluster.policies) - - @mock.patch.object(pcb.Policy, 'load') - def test_attach_policy_already_attached(self, mock_load): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - policy_id = '62d52dd6-5f83-4340-b079-349da2f9ffd9' - existing = mock.Mock(id=policy_id) - cluster.rt['policies'] = [existing] - policy = mock.Mock() - mock_load.return_value = policy - - # do it - res, reason = cluster.attach_policy(self.context, policy_id, {}) - - self.assertTrue(res) - self.assertEqual('Policy already attached.', reason) - mock_load.assert_called_once_with(self.context, policy_id) - - @mock.patch.object(pcb.Policy, 'load') - def test_attach_policy_type_conflict(self, mock_load): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - - existing = mock.Mock() - existing.id = POLICY_ID - existing.type = 'POLICY_TYPE_ONE' - cluster.rt['policies'] = [existing] - - policy = mock.Mock() - policy.singleton = True - policy.type = 'POLICY_TYPE_ONE' - mock_load.return_value = policy - - # do it - new_policy_id = '62d52dd6-5f83-4340-b079-349da2f9ffd9' - res, reason = cluster.attach_policy(self.context, new_policy_id, {}) - - # assert - self.assertFalse(res) - expected = ('Only one instance of policy type (POLICY_TYPE_ONE) can ' - 'be attached to a cluster, but another instance ' - '(%s) is found attached to the cluster ' - '(%s) already.' % (POLICY_ID, CLUSTER_ID)) - self.assertEqual(expected, reason) - mock_load.assert_called_once_with(self.context, new_policy_id) - - @mock.patch.object(cpm, 'ClusterPolicy') - @mock.patch.object(pcb.Policy, 'load') - def test_attach_policy_type_conflict_but_ok(self, mock_load, mock_cp): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - existing = mock.Mock() - existing.id = POLICY_ID - existing.type = 'POLICY_TYPE_ONE' - cluster.rt['policies'] = [existing] - - policy = mock.Mock() - policy.singleton = False - policy.type = 'POLICY_TYPE_ONE' - policy.attach.return_value = (True, None) - policy.PRIORITY = 10 - mock_load.return_value = policy - - binding = mock.Mock() - mock_cp.return_value = binding - - values = {'enabled': True} - - # do it - new_policy_id = '62d52dd6-5f83-4340-b079-349da2f9ffd9' - res, reason = cluster.attach_policy(self.context, new_policy_id, - values) - - # assert - self.assertTrue(res) - self.assertEqual('Policy attached.', reason) - - policy.attach.assert_called_once_with(cluster, enabled=True) - mock_load.assert_called_once_with(self.context, new_policy_id) - mock_cp.assert_called_once_with(cluster.id, new_policy_id, priority=10, - enabled=True, data=None) - binding.store.assert_called_once_with(self.context) - self.assertIn(policy, cluster.policies) - - @mock.patch.object(pcb.Policy, 'load') - def test_attach_policy_failed_do_attach(self, mock_load): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - policy = mock.Mock() - policy.attach.return_value = (False, 'Bad things happened.') - mock_load.return_value = policy - - # do it - new_id = '62d52dd6-5f83-4340-b079-349da2f9ffd9' - res, reason = cluster.attach_policy(self.context, new_id, {}) - - self.assertFalse(res) - self.assertEqual('Bad things happened.', reason) - policy.attach.assert_called_once_with(cluster, enabled=True) - mock_load.assert_called_once_with(self.context, new_id) - - @mock.patch.object(cpo.ClusterPolicy, 'delete') - @mock.patch.object(pcb.Policy, 'load') - def test_detach_policy(self, mock_load, mock_detach): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - - policy = mock.Mock() - policy.id = POLICY_ID - existing = mock.Mock() - existing.id = POLICY_ID - cluster.rt['policies'] = [existing] - policy.detach.return_value = (True, None) - mock_load.return_value = policy - - res, reason = cluster.detach_policy(self.context, POLICY_ID) - - self.assertTrue(res) - self.assertEqual('Policy detached.', reason) - policy.detach.assert_called_once_with(cluster) - mock_load.assert_called_once_with(self.context, POLICY_ID) - mock_detach.assert_called_once_with(self.context, CLUSTER_ID, - POLICY_ID) - self.assertEqual([], cluster.rt['policies']) - - def test_detach_policy_not_attached(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.rt['policies'] = [] - - res, reason = cluster.detach_policy(self.context, POLICY_ID) - - self.assertFalse(res) - self.assertEqual('Policy not attached.', reason) - - @mock.patch.object(pcb.Policy, 'load') - def test_detach_policy_failed_detach(self, mock_load): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - policy = mock.Mock() - policy.id = POLICY_ID - policy.detach.return_value = False, 'Things went wrong.' - mock_load.return_value = policy - cluster.rt['policies'] = [policy] - - res, reason = cluster.detach_policy(self.context, POLICY_ID) - - self.assertFalse(res) - self.assertEqual('Things went wrong.', reason) - mock_load.assert_called_once_with(self.context, POLICY_ID) - policy.detach.assert_called_once_with(cluster) - - @mock.patch.object(cpo.ClusterPolicy, 'update') - def test_update_policy(self, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.id = CLUSTER_ID - - existing = mock.Mock() - existing.id = POLICY_ID - existing.type = "senlin.policy.foo" - cluster.rt['policies'] = [existing] - values = { - 'enabled': False - } - - res, reason = cluster.update_policy(self.context, POLICY_ID, **values) - - self.assertTrue(res) - self.assertEqual('Policy updated.', reason) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, POLICY_ID, {'enabled': False}) - - def test_update_policy_not_attached(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - cluster.rt['policies'] = [] - values = {'enabled': False} - - # do it - res, reason = cluster.update_policy(self.context, POLICY_ID, **values) - - self.assertFalse(res) - self.assertEqual('Policy not attached.', reason) - - def test_update_policy_no_update_needed(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - existing = mock.Mock() - existing.id = POLICY_ID - cluster.rt['policies'] = [existing] - values = {} - - # do it - res, reason = cluster.update_policy(self.context, POLICY_ID, **values) - - self.assertTrue(res) - self.assertEqual('No update is needed.', reason) - - @mock.patch.object(cpo.ClusterPolicy, "update") - @mock.patch.object(health_manager, "enable") - def test_update_policy_enable_health(self, mock_enable, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, id=CLUSTER_ID) - existing = mock.Mock(id=POLICY_ID, type="senlin.policy.health") - cluster.rt['policies'] = [existing] - values = {"enabled": True} - - # do it - res, reason = cluster.update_policy(self.context, POLICY_ID, **values) - - self.assertTrue(res) - mock_enable.assert_called_once_with(CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, POLICY_ID, {'enabled': True}) - - @mock.patch.object(cpo.ClusterPolicy, "update") - @mock.patch.object(health_manager, "disable") - def test_update_policy_disable_health(self, mock_disable, mock_update): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID, id=CLUSTER_ID) - existing = mock.Mock(id=POLICY_ID, type="senlin.policy.health") - cluster.rt['policies'] = [existing] - values = {"enabled": False} - - # do it - res, reason = cluster.update_policy(self.context, POLICY_ID, **values) - - self.assertTrue(res) - mock_disable.assert_called_once_with(CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, POLICY_ID, {'enabled': False}) - - def test_get_region_distribution(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - - node1 = mock.Mock() - node1.data = {'placement': {'region_name': 'R1'}} - node2 = mock.Mock() - node2.data = {'placement': {'region_name': 'R2'}} - node3 = mock.Mock() - node3.data = {'key': 'value'} - - node4 = mock.Mock() - node4.data = {'placement': {'region_name': 'BAD'}} - - nodes = [node1, node2, node3, node4] - for n in nodes: - cluster.add_node(n) - - result = cluster.get_region_distribution(['R1', 'R2', 'R3']) - - self.assertEqual(3, len(result)) - self.assertEqual(1, result['R1']) - self.assertEqual(1, result['R2']) - self.assertEqual(0, result['R3']) - - def test_get_zone_distribution(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - node1 = mock.Mock() - node1.data = {} - node1.get_details.return_value = { - 'OS-EXT-AZ:availability_zone': 'AZ1', - } - node2 = mock.Mock() - node2.data = { - 'foobar': 'irrelevant' - } - node3 = mock.Mock() - node3.data = { - 'placement': { - 'zone': 'AZ2' - } - } - - nodes = [node1, node2, node3] - for n in nodes: - cluster.add_node(n) - - result = cluster.get_zone_distribution(self.context, - ['AZ1', 'AZ2', 'AZ3']) - - self.assertEqual(3, len(result)) - self.assertEqual(1, result['AZ1']) - self.assertEqual(1, result['AZ2']) - self.assertEqual(0, result['AZ3']) - - node1.get_details.assert_called_once_with(self.context) - - def test_nodes_by_region(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - node1 = mock.Mock(data={'placement': {'region_name': 'R1'}}) - node2 = mock.Mock(data={'placement': {'region_name': 'R2'}}) - node3 = mock.Mock(data={'key': 'value'}) - node4 = mock.Mock(data={'placement': {'region_name': 'BAD'}}) - - nodes = [node1, node2, node3, node4] - for n in nodes: - cluster.add_node(n) - - result = cluster.nodes_by_region('R1') - self.assertEqual(1, len(result)) - self.assertEqual(node1, result[0]) - - result = cluster.nodes_by_region('R2') - self.assertEqual(1, len(result)) - self.assertEqual(node2, result[0]) - - result = cluster.nodes_by_region('R3') - self.assertEqual(0, len(result)) - - def test_nodes_by_zone(self): - cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) - node1 = mock.Mock(data={'placement': {'zone': 'AZ1'}}) - node2 = mock.Mock(data={'placement': {'zone': 'AZ2'}}) - node3 = mock.Mock(data={'key': 'value'}) - node4 = mock.Mock(data={'placement': {'zone': 'BAD'}}) - - nodes = [node1, node2, node3, node4] - for n in nodes: - cluster.add_node(n) - - result = cluster.nodes_by_zone('AZ1') - self.assertEqual(1, len(result)) - self.assertEqual(node1, result[0]) - - result = cluster.nodes_by_zone('AZ2') - self.assertEqual(1, len(result)) - self.assertEqual(node2, result[0]) - - result = cluster.nodes_by_region('AZ3') - self.assertEqual(0, len(result)) - - @mock.patch.object(node_mod.Node, 'load_all') - @mock.patch.object(node_mod.Node, 'do_check') - @mock.patch.object(cm.Cluster, 'update_node') - def test_health_check(self, mock_update, mock_check, mock_load): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, - min_size=2, id=CLUSTER_ID) - node1 = node_mod.Node('fake1', PROFILE_ID, status='ACTIVE') - node2 = node_mod.Node('fake2', PROFILE_ID, status='ACTIVE') - nodes = [node1, node2] - for node in nodes: - cluster.add_node(node) - - node1.status = 'ERROR' - mock_load.return_value = [node1, node2] - - cluster.health_check(self.context) - - self.assertEqual(2, len(cluster.nodes)) - self.assertEqual([node1, node2], cluster.nodes) - - mock_update.assert_called_once_with([node1, node2]) - mock_check.assert_has_calls([ - mock.call(self.context), - mock.call(self.context) - ]) - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_below_min_size(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, - min_size=2, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ERROR') - node3 = mock.Mock(status='WARNING') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST') - rt = cluster.rt - self.assertEqual(3, len(rt['nodes'])) - self.assertIsInstance(rt['nodes'], list) - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'status': consts.CS_ERROR, - 'status_reason': 'TEST: number of active nodes is below ' - 'min_size (2).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_below_desired_capacity(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, - min_size=1, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ERROR') - node3 = mock.Mock(status='WARNING') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST') - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'status': consts.CS_WARNING, - 'status_reason': 'TEST: number of active nodes is below ' - 'desired_capacity (5).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_equal_desired_capacity(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 3, PROFILE_ID, - min_size=1, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ACTIVE') - node3 = mock.Mock(status='ACTIVE') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST') - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'status': consts.CS_ACTIVE, - 'status_reason': 'TEST: number of active nodes is equal or above ' - 'desired_capacity (3).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_above_desired_capacity(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 2, PROFILE_ID, - min_size=1, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ACTIVE') - node3 = mock.Mock(status='ACTIVE') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST') - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'status': consts.CS_ACTIVE, - 'status_reason': 'TEST: number of active nodes is equal or above ' - 'desired_capacity (2).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_above_max_size(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 2, PROFILE_ID, - max_size=2, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ACTIVE') - node3 = mock.Mock(status='ACTIVE') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST') - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'status': consts.CS_WARNING, - 'status_reason': 'TEST: number of active nodes is above ' - 'max_size (2).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_with_new_desired(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ERROR') - node3 = mock.Mock(status='WARNING') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST', desired_capacity=2) - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'desired_capacity': 2, - 'status': consts.CS_WARNING, - 'status_reason': 'TEST: number of active nodes is below ' - 'desired_capacity (2).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status__new_desired_is_zero(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ERROR') - node3 = mock.Mock(status='WARNING') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST', desired_capacity=0) - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'desired_capacity': 0, - 'status': consts.CS_ACTIVE, - 'status_reason': 'TEST: number of active nodes is equal or above ' - 'desired_capacity (0).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_with_new_min(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 5, PROFILE_ID, - id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ERROR') - node3 = mock.Mock(status='WARNING') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST', min_size=2) - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'min_size': 2, - 'status': consts.CS_ERROR, - 'status_reason': 'TEST: number of active nodes is below ' - 'min_size (2).'}) - - @mock.patch.object(co.Cluster, 'update') - @mock.patch.object(node_mod.Node, 'load_all') - def test_eval_status_with_new_max(self, mock_load, mock_update): - cluster = cm.Cluster('test-cluster', 2, PROFILE_ID, - max_size=5, id=CLUSTER_ID) - node1 = mock.Mock(status='ACTIVE') - node2 = mock.Mock(status='ACTIVE') - node3 = mock.Mock(status='ACTIVE') - mock_load.return_value = [node1, node2, node3] - - cluster.eval_status(self.context, 'TEST', max_size=6) - - mock_load.assert_called_once_with(self.context, cluster_id=CLUSTER_ID) - mock_update.assert_called_once_with( - self.context, CLUSTER_ID, - {'max_size': 6, - 'status': consts.CS_ACTIVE, - 'status_reason': 'TEST: number of active nodes is equal or above ' - 'desired_capacity (2).'}) diff --git a/senlin/tests/unit/engine/test_cluster_policy.py b/senlin/tests/unit/engine/test_cluster_policy.py deleted file mode 100644 index 1ea077dbc..000000000 --- a/senlin/tests/unit/engine/test_cluster_policy.py +++ /dev/null @@ -1,142 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils - -from senlin.common import exception -from senlin.common import utils as common_utils -from senlin.engine import cluster_policy as cpm -from senlin.objects import cluster_policy as cpo -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -CLUSTER_ID = '8d674833-6c0c-4e1c-928b-4bb3a4ebd4ae' -POLICY_ID = 'fa573870-fe44-42aa-84a9-08462f0e6999' -PROFILE_ID = '12abef70-ab31-484a-92aa-02388f0e6ccc' - - -class TestClusterPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestClusterPolicy, self).setUp() - self.context = utils.dummy_context() - - def test_cluster_policy_init(self): - values = { - 'priority': 12, - 'enabled': True, - } - cp = cpm.ClusterPolicy(CLUSTER_ID, POLICY_ID, **values) - - self.assertIsNone(cp.id) - self.assertEqual(CLUSTER_ID, cp.cluster_id) - self.assertEqual(POLICY_ID, cp.policy_id) - self.assertEqual(12, cp.priority) - self.assertTrue(cp.enabled) - self.assertEqual({}, cp.data) - self.assertIsNone(cp.last_op) - self.assertEqual('', cp.cluster_name) - self.assertEqual('', cp.policy_type) - self.assertEqual('', cp.policy_name) - - def test_cluster_policy_store(self): - utils.create_profile(self.context, PROFILE_ID) - cluster = utils.create_cluster(self.context, CLUSTER_ID, PROFILE_ID) - policy = utils.create_policy(self.context, POLICY_ID) - values = { - 'priority': 12, - 'enabled': True, - } - cp = cpm.ClusterPolicy(cluster.id, policy.id, **values) - self.assertIsNone(cp.id) - cp_id = cp.store(self.context) - self.assertIsNotNone(cp_id) - - result = cpo.ClusterPolicy.get(self.context, CLUSTER_ID, POLICY_ID) - - self.assertIsNotNone(result) - self.assertEqual(12, result.priority) - self.assertTrue(result.enabled) - self.assertEqual({}, result.data) - self.assertIsNone(result.last_op) - - # do an update - cp.enabled = False - cp.priority = 60 - cp.data = {'foo': 'bar'} - timestamp = timeutils.utcnow(True) - cp.last_op = timestamp - - new_id = cp.store(self.context) - self.assertEqual(cp_id, new_id) - - result = cpo.ClusterPolicy.get(self.context, CLUSTER_ID, POLICY_ID) - - self.assertIsNotNone(result) - self.assertFalse(result.enabled) - self.assertEqual(60, result.priority) - self.assertEqual({'foo': 'bar'}, result.data) - self.assertEqual(common_utils.isotime(timestamp), - common_utils.isotime(result.last_op)) - - def test_cluster_policy_load(self): - ex = self.assertRaises(exception.PolicyNotAttached, - cpm.ClusterPolicy.load, - self.context, 'some-cluster', 'any-policy') - self.assertEqual("The policy 'any-policy' is not attached to the " - "specified cluster 'some-cluster'.", - str(ex)) - - utils.create_profile(self.context, PROFILE_ID) - cluster = utils.create_cluster(self.context, CLUSTER_ID, PROFILE_ID) - policy = utils.create_policy(self.context, POLICY_ID) - - values = { - 'priority': 12, - 'enabled': True, - } - cp = cpm.ClusterPolicy(cluster.id, policy.id, **values) - cp_id = cp.store(self.context) - - result = cpm.ClusterPolicy.load(self.context, CLUSTER_ID, POLICY_ID) - - self.assertEqual(cp_id, result.id) - self.assertEqual(cluster.id, result.cluster_id) - self.assertEqual(policy.id, result.policy_id) - self.assertTrue(result.enabled) - self.assertEqual(12, result.priority) - self.assertEqual({}, result.data) - self.assertIsNone(result.last_op) - self.assertEqual('test-cluster', result.cluster_name) - self.assertEqual('senlin.policy.dummy-1.0', result.policy_type) - self.assertEqual('test_policy', result.policy_name) - - def test_cluster_policy_to_dict(self): - values = { - 'priority': 12, - 'enabled': True, - } - cp = cpm.ClusterPolicy(CLUSTER_ID, POLICY_ID, **values) - self.assertIsNone(cp.id) - expected = { - 'id': None, - 'cluster_id': CLUSTER_ID, - 'policy_id': POLICY_ID, - 'enabled': True, - 'data': {}, - 'last_op': None, - 'cluster_name': '', - 'policy_type': '', - 'policy_name': '', - } - - self.assertEqual(expected, cp.to_dict()) diff --git a/senlin/tests/unit/engine/test_engine_parser.py b/senlin/tests/unit/engine/test_engine_parser.py deleted file mode 100644 index 920af58ca..000000000 --- a/senlin/tests/unit/engine/test_engine_parser.py +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import io -import os -from unittest import mock -import urllib - - -from senlin.engine import parser -from senlin.tests.unit.common import base - - -class ParserTest(base.SenlinTestCase): - - json_template = """ - { - "type": "os.heat.stack", - "version": 1.0, - "properties": { - "name": "random_string_stack", - "template": "random_string_stack.yaml" - } - } - """ - - yaml_template = """ - type: os.heat.stack - version: 1.0 - properties: - name: random_string_stack - template: random_string_stack.yaml - """ - - expect_result = { - "type": "os.heat.stack", - "version": 1, - "properties": { - "name": "random_string_stack", - "template": "random_string_stack.yaml" - } - } - - def test_parse_json_success(self): - result = parser.simple_parse(self.json_template) - self.assertEqual(self.expect_result, result) - - def test_parse_yaml_success(self): - result = parser.simple_parse(self.yaml_template) - self.assertEqual(self.expect_result, result) - - def test_parse_string(self): - tmpl_str = 'json string' - ex = self.assertRaises(ValueError, - parser.simple_parse, - tmpl_str) - self.assertEqual('The input is not a JSON object or YAML mapping.', - str(ex)) - - def test_parse_list(self): - tmpl_str = '["foo" , "bar"]' - ex = self.assertRaises(ValueError, - parser.simple_parse, - tmpl_str) - self.assertEqual('The input is not a JSON object or YAML mapping.', - str(ex)) - - def test_parse_invalid_yaml_and_json_template(self): - tmpl_str = '{test' - ex = self.assertRaises(ValueError, - parser.simple_parse, - tmpl_str) - self.assertIn('Error parsing input:', - str(ex)) - - -class ParseTemplateIncludeFiles(base.SenlinTestCase): - scenarios = [ - ('include_from_file_without_path', dict( - tmpl_str='foo: !include a.file', - url_path='file:///tmp/a.file', - )), - ('include_from_file_with_path', dict( - tmpl_str='foo: !include file:///tmp/a.file', - url_path='file:///tmp/a.file', - )), - ('include_from_http', dict( - tmpl_str='foo: !include http://tmp/a.file', - url_path='http://tmp/a.file', - )), - ('include_from_https', dict( - tmpl_str='foo: !include https://tmp/a.file', - url_path='https://tmp/a.file', - )) - - ] - - @mock.patch.object(urllib.request, 'urlopen') - @mock.patch.object(os.path, 'abspath') - def test_parse_template(self, mock_abspath, mock_urlopen): - fetch_data = 'bar' - expect_result = { - 'foo': 'bar' - } - - mock_abspath.return_value = '/tmp/a.file' - mock_urlopen.side_effect = [ - urllib.error.URLError('oops'), - io.StringIO(fetch_data) - ] - - ex = self.assertRaises( - IOError, - parser.simple_parse, self.tmpl_str - ) - self.assertIn('Failed retrieving file %s:' % self.url_path, - str(ex)) - result = parser.simple_parse(self.tmpl_str) - self.assertEqual(expect_result, - result) - mock_urlopen.assert_has_calls([ - mock.call(self.url_path), - mock.call(self.url_path) - ]) diff --git a/senlin/tests/unit/engine/test_environment.py b/senlin/tests/unit/engine/test_environment.py deleted file mode 100644 index 31fc5b222..000000000 --- a/senlin/tests/unit/engine/test_environment.py +++ /dev/null @@ -1,346 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glob -from unittest import mock - -from senlin.common import exception -from senlin.engine import environment -from senlin.tests.unit.common import base - -fake_env_str = """ -parameters: - pa: va - pb: vb -custom_profiles: - prof_1: plugin_1 -custom_policies: - policy_2: plugin_2 -""" - - -class TestEnvironment(base.SenlinTestCase): - - def test_create_global(self): - e = environment.Environment(is_global=True) - - self.assertEqual({}, e.params) - self.assertEqual('profiles', e.profile_registry.registry_name) - self.assertEqual('policies', e.policy_registry.registry_name) - self.assertEqual('drivers', e.driver_registry.registry_name) - self.assertEqual('endpoints', e.endpoint_registry.registry_name) - self.assertTrue(e.profile_registry.is_global) - self.assertTrue(e.policy_registry.is_global) - self.assertTrue(e.driver_registry.is_global) - self.assertTrue(e.endpoint_registry.is_global) - - def test_create_default(self): - ge = environment.global_env() - e = environment.Environment() - - reg_prof = e.profile_registry - reg_plcy = e.policy_registry - reg_driv = e.driver_registry - reg_endp = e.endpoint_registry - - self.assertEqual({}, e.params) - self.assertEqual('profiles', reg_prof.registry_name) - self.assertEqual('policies', reg_plcy.registry_name) - self.assertEqual('drivers', reg_driv.registry_name) - self.assertEqual('endpoints', reg_endp.registry_name) - self.assertFalse(reg_prof.is_global) - self.assertFalse(reg_plcy.is_global) - self.assertFalse(reg_driv.is_global) - self.assertFalse(reg_endp.is_global) - self.assertEqual('profiles', ge.profile_registry.registry_name) - self.assertEqual('policies', ge.policy_registry.registry_name) - self.assertEqual('drivers', ge.driver_registry.registry_name) - self.assertEqual('endpoints', ge.endpoint_registry.registry_name) - self.assertEqual(ge.profile_registry, reg_prof.global_registry) - self.assertEqual(ge.policy_registry, reg_plcy.global_registry) - self.assertEqual(ge.driver_registry, reg_driv.global_registry) - self.assertEqual(ge.endpoint_registry, reg_endp.global_registry) - - def test_create_with_env(self): - env = { - 'parameters': { - 'p1': 'v1', - 'p2': True, - }, - 'custom_profiles': { - 'PROFILE_FOO': 'some.class', - 'PROFILE_BAR': 'other.class', - }, - 'custom_policies': { - 'POLICY_Alpha': 'package.alpha', - 'POLICY_Beta': 'package.beta', - }, - } - - e = environment.Environment(env=env, is_global=True) - - self.assertEqual('v1', e.params['p1']) - self.assertTrue(e.params['p2']) - self.assertEqual('some.class', e.get_profile('PROFILE_FOO')) - self.assertEqual('other.class', e.get_profile('PROFILE_BAR')) - self.assertEqual('package.alpha', e.get_policy('POLICY_Alpha')) - self.assertEqual('package.beta', e.get_policy('POLICY_Beta')) - - def test_parse(self): - env = environment.Environment() - result = env.parse(fake_env_str) - - self.assertEqual('va', result['parameters']['pa']) - self.assertEqual('vb', result['parameters']['pb']) - self.assertEqual('plugin_1', result['custom_profiles']['prof_1']) - self.assertEqual('plugin_2', result['custom_policies']['policy_2']) - - # unknown sections - env_str = "variables:\n p1: v1" - err = self.assertRaises(ValueError, env.parse, env_str) - self.assertEqual('environment has unknown section "variables"', - str(err)) - - # omitted sections - env_str = "parameters:\n p1: v1" - result = env.parse(env_str) - self.assertEqual('v1', result['parameters']['p1']) - self.assertEqual({}, result['custom_profiles']) - self.assertEqual({}, result['custom_policies']) - - def test_parse_empty(self): - env = environment.Environment() - result = env.parse(None) - self.assertEqual({}, result) - - def test_load(self): - env = environment.Environment() - env.load({}) - self.assertEqual({}, env.params) - self.assertEqual({}, env.profile_registry._registry) - self.assertEqual({}, env.policy_registry._registry) - self.assertEqual({}, env.driver_registry._registry) - - env_dict = { - 'parameters': { - 'P': 'V' - }, - 'custom_profiles': { - 'C1': 'class1', - }, - 'custom_policies': { - 'C2': 'class2', - }, - } - env.load(env_dict) - self.assertEqual('V', env.params['P']) - self.assertEqual('class1', env.get_profile('C1')) - self.assertEqual('class2', env.get_policy('C2')) - - def test_check_plugin_name(self): - env = environment.Environment() - - for pt in ['Profile', 'Policy', 'Driver', 'Endpoint']: - res = env._check_plugin_name(pt, 'abc') - self.assertIsNone(res) - - ex = self.assertRaises(exception.InvalidPlugin, - env._check_plugin_name, pt, '') - self.assertEqual('%s type name not specified' % pt, - str(ex)) - - ex = self.assertRaises(exception.InvalidPlugin, - env._check_plugin_name, pt, None) - self.assertEqual('%s type name not specified' % pt, - str(ex)) - - for v in [123, {}, ['a'], ('b', 'c'), True]: - ex = self.assertRaises(exception.InvalidPlugin, - env._check_plugin_name, pt, v) - self.assertEqual('%s type name is not a string' % pt, - str(ex)) - - def test_register_and_get_profile(self): - plugin = mock.Mock() - env = environment.Environment() - - ex = self.assertRaises(exception.ResourceNotFound, - env.get_profile, 'foo') - self.assertEqual("The profile_type 'foo' could not be found.", - str(ex)) - - env.register_profile('foo', plugin) - self.assertEqual(plugin, env.get_profile('foo')) - - def test_get_profile_types(self): - env = environment.Environment() - plugin1 = mock.Mock(VERSIONS={'1.0': 'v'}) - env.register_profile('foo-1.0', plugin1) - plugin2 = mock.Mock(VERSIONS={'1.2': 'v1'}) - env.register_profile('bar-1.2', plugin2) - - actual = env.get_profile_types() - self.assertIn( - {'name': 'foo', 'version': '1.0', 'support_status': {'1.0': 'v'}}, - actual) - self.assertIn( - {'name': 'bar', 'version': '1.2', 'support_status': {'1.2': 'v1'}}, - actual) - - def test_register_and_get_policy(self): - plugin = mock.Mock() - env = environment.Environment() - - ex = self.assertRaises(exception.ResourceNotFound, - env.get_policy, 'foo') - self.assertEqual("The policy_type 'foo' could not be found.", - str(ex)) - - env.register_policy('foo', plugin) - self.assertEqual(plugin, env.get_policy('foo')) - - def test_get_policy_types(self): - env = environment.Environment() - plugin1 = mock.Mock(VERSIONS={'0.1': 'v'}) - env.register_policy('foo-0.1', plugin1) - plugin2 = mock.Mock(VERSIONS={'0.1': 'v1'}) - env.register_policy('bar-0.1', plugin2) - - actual = env.get_policy_types() - self.assertIn( - {'name': 'foo', 'version': '0.1', 'support_status': {'0.1': 'v'}}, - actual) - self.assertIn( - {'name': 'bar', 'version': '0.1', 'support_status': {'0.1': 'v1'}}, - actual) - - def test_register_and_get_driver_types(self): - plugin = mock.Mock() - env = environment.Environment() - - ex = self.assertRaises(exception.InvalidPlugin, - env.get_driver, 'foo') - self.assertEqual('Driver plugin foo is not found.', - str(ex)) - - env.register_driver('foo', plugin) - self.assertEqual(plugin, env.get_driver('foo')) - - def test_get_driver_types(self): - env = environment.Environment() - plugin1 = mock.Mock(VERSIONS={}) - env.register_driver('foo', plugin1) - plugin2 = mock.Mock(VERSIONS={}) - env.register_driver('bar', plugin2) - - actual = env.get_driver_types() - self.assertIn( - {'name': 'foo', 'version': '', 'support_status': {'': ''}}, - actual) - self.assertIn( - {'name': 'bar', 'version': '', 'support_status': {'': ''}}, - actual) - - def test_register_and_get_endpoints(self): - plugin = mock.Mock() - env = environment.Environment() - - ex = self.assertRaises(exception.InvalidPlugin, - env.get_endpoint, 'foo') - self.assertEqual('Endpoint plugin foo is not found.', - str(ex)) - - env.register_endpoint('foo', plugin) - self.assertEqual(plugin, env.get_endpoint('foo')) - - def test_read_global_environment(self): - mock_dir = self.patchobject(glob, 'glob') - mock_dir.return_value = ['/etc/senlin/environments/e.yaml'] - env_dir = '/etc/senlin/environments' - env_contents = 'parameters:\n p1: v1' - env = environment.Environment(is_global=True) - with mock.patch('senlin.engine.environment.open', - mock.mock_open(read_data=env_contents), - create=True) as mock_open: - env.read_global_environment() - - mock_dir.assert_called_with(env_dir + '/*') - mock_open.assert_called_with('%s/e.yaml' % env_dir) - - def test_empty_environment_dir(self): - mock_dir = self.patchobject(glob, 'glob', return_value=[]) - env_dir = '/etc/senlin/environments' - env = environment.Environment() - env.read_global_environment() - - mock_dir.assert_called_once_with(env_dir + '/*') - - def test_read_global_environment_oserror(self): - mock_dir = self.patchobject(glob, 'glob') - mock_dir.side_effect = OSError - - env = environment.Environment(is_global=True) - env_dir = '/etc/senlin/environments' - env.read_global_environment() - - mock_dir.assert_called_once_with(env_dir + '/*') - - def test_read_global_environment_ioerror(self): - mock_dir = self.patchobject(glob, 'glob') - mock_dir.return_value = ['/etc/senlin/environments/e.yaml'] - env_dir = '/etc/senlin/environments' - env = environment.Environment(is_global=True) - env_contents = '' - - with mock.patch('senlin.engine.environment.open', - mock.mock_open(read_data=env_contents), - create=True) as mock_open: - mock_open.side_effect = IOError - env.read_global_environment() - - mock_dir.assert_called_once_with(env_dir + '/*') - mock_open.assert_called_once_with('%s/e.yaml' % env_dir) - - def test_read_global_environment_parse_error(self): - mock_dir = self.patchobject(glob, 'glob') - mock_dir.return_value = ['/etc/senlin/environments/e.yaml'] - env_dir = '/etc/senlin/environments' - env_contents = 'aii$%@@$#7' - env = environment.Environment(is_global=True) - - with mock.patch('senlin.engine.environment.open', - mock.mock_open(read_data=env_contents), - create=True) as mock_open: - env.read_global_environment() - - mock_dir.assert_called_once_with(env_dir + '/*') - mock_open.assert_called_once_with('%s/e.yaml' % env_dir) - - @mock.patch.object(environment, '_get_mapping') - def test_global_initialize(self, mock_mapping): - mock_mapping.return_value = [['aaa', mock.Mock()]] - - environment._environment = None - environment.initialize() - - expected = [mock.call('senlin.profiles'), - mock.call('senlin.policies'), - mock.call('senlin.drivers'), - mock.call('senlin.endpoints')] - - self.assertIsNotNone(environment._environment) - self.assertEqual(expected, mock_mapping.call_args_list) - self.assertIsNotNone(environment.global_env().get_profile('aaa')) - self.assertIsNotNone(environment.global_env().get_policy('aaa')) - self.assertIsNotNone(environment.global_env().get_driver('aaa')) - self.assertIsNotNone(environment.global_env().get_endpoint('aaa')) - environment._environment = None diff --git a/senlin/tests/unit/engine/test_event.py b/senlin/tests/unit/engine/test_event.py deleted file mode 100644 index e2480dbea..000000000 --- a/senlin/tests/unit/engine/test_event.py +++ /dev/null @@ -1,222 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_log import log as logging -import testtools - -from senlin.common import consts -from senlin.engine import event - - -class TestEvent(testtools.TestCase): - - def setUp(self): - super(TestEvent, self).setUp() - logging.register_options(cfg.CONF) - - @mock.patch('stevedore.named.NamedExtensionManager') - def test_load_dispatcher(self, mock_mgr): - - class FakeDispatcher(object): - values = {'a': 1, 'b': 2} - - def __iter__(self): - return iter(self.values) - - def __getitem__(self, key): - return self.values.get(key, '') - - def __contains__(self, name): - return name in self.values - - def names(self): - return self.values.keys() - - mock_mgr.return_value = FakeDispatcher() - res = event.load_dispatcher() - - self.assertIsNone(res) - mock_mgr.assert_called_once_with( - namespace='senlin.dispatchers', - names=cfg.CONF.event_dispatchers, - invoke_on_load=True, - propagate_map_exceptions=True) - - def test_event_data(self): - entity = mock.Mock(id='ENTITY_ID') - entity.name = 'FAKE_ENTITY' - action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity) - - res = event._event_data(action) - - self.assertEqual({'name': 'FAKE_ENTITY', 'obj_id': 'ENTITY_I', - 'action': 'ACTION', 'phase': None, 'reason': None, - 'id': 'ACTION_I'}, - res) - - def test_event_data_with_phase_reason(self): - entity = mock.Mock(id='ENTITY_ID') - entity.name = 'FAKE_ENTITY' - action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity) - - res = event._event_data(action, phase='PHASE1', reason='REASON1') - - self.assertEqual({'name': 'FAKE_ENTITY', 'id': 'ACTION_I', - 'action': 'ACTION', 'phase': 'PHASE1', - 'obj_id': 'ENTITY_I', 'reason': 'REASON1'}, - res) - - def test_dump(self): - cfg.CONF.set_override('debug', True) - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - action = mock.Mock(cause=consts.CAUSE_RPC) - try: - event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1') - event.dispatchers.map_method.assert_called_once_with( - 'dump', logging.INFO, action, - phase='Phase1', reason='Reason1', timestamp='TS1') - finally: - event.dispatchers = saved_dispathers - - def test_dump_without_timestamp(self): - cfg.CONF.set_override('debug', True) - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - action = mock.Mock(cause=consts.CAUSE_RPC) - try: - event._dump(logging.INFO, action, 'Phase1', 'Reason1', None) - - event.dispatchers.map_method.assert_called_once_with( - 'dump', logging.INFO, action, - phase='Phase1', reason='Reason1', timestamp=mock.ANY) - finally: - event.dispatchers = saved_dispathers - - def test_dump_guarded(self): - cfg.CONF.set_override('debug', False) - cfg.CONF.set_override('priority', 'warning', group='dispatchers') - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - action = mock.Mock(cause=consts.CAUSE_RPC) - try: - event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1') - # (temporary)Remove map_method.call_count for coverage test - # self.assertEqual(0, event.dispatchers.map_method.call_count) - finally: - event.dispatchers = saved_dispathers - - def test_dump_exclude_derived_actions_positive(self): - cfg.CONF.set_override('exclude_derived_actions', True, - group='dispatchers') - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - action = mock.Mock(cause=consts.CAUSE_DERIVED) - try: - event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1') - - self.assertEqual(0, event.dispatchers.map_method.call_count) - finally: - event.dispatchers = saved_dispathers - - def test_dump_exclude_derived_actions_negative(self): - cfg.CONF.set_override('exclude_derived_actions', False, - group='dispatchers') - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - action = mock.Mock(cause=consts.CAUSE_DERIVED) - try: - event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1') - - event.dispatchers.map_method.assert_called_once_with( - 'dump', logging.INFO, action, - phase='Phase1', reason='Reason1', timestamp='TS1') - finally: - event.dispatchers = saved_dispathers - - def test_dump_with_exception(self): - cfg.CONF.set_override('debug', True) - saved_dispathers = event.dispatchers - event.dispatchers = mock.Mock() - event.dispatchers.map_method.side_effect = Exception('fab') - action = mock.Mock(cause=consts.CAUSE_RPC) - try: - res = event._dump(logging.INFO, action, 'Phase1', 'Reason1', 'TS1') - - self.assertIsNone(res) # exception logged only - event.dispatchers.map_method.assert_called_once_with( - 'dump', logging.INFO, action, - phase='Phase1', reason='Reason1', timestamp='TS1') - finally: - event.dispatchers = saved_dispathers - - -@mock.patch.object(event, '_dump') -class TestLogMethods(testtools.TestCase): - - def test_critical(self, mock_dump): - entity = mock.Mock(id='1234567890') - entity.name = 'fake_obj' - action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME') - - res = event.critical(action, 'P1', 'R1', 'TS1') - - self.assertIsNone(res) - mock_dump.assert_called_once_with(logging.CRITICAL, action, - 'P1', 'R1', 'TS1') - - def test_error(self, mock_dump): - entity = mock.Mock(id='1234567890') - entity.name = 'fake_obj' - action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME') - - res = event.error(action, 'P1', 'R1', 'TS1') - - self.assertIsNone(res) - mock_dump.assert_called_once_with(logging.ERROR, action, - 'P1', 'R1', 'TS1') - - def test_warning(self, mock_dump): - entity = mock.Mock(id='1234567890') - entity.name = 'fake_obj' - action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME') - - res = event.warning(action, 'P1', 'R1', 'TS1') - - self.assertIsNone(res) - mock_dump.assert_called_once_with(logging.WARNING, action, - 'P1', 'R1', 'TS1') - - def test_info(self, mock_dump): - entity = mock.Mock(id='1234567890') - entity.name = 'fake_obj' - action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME') - - res = event.info(action, 'P1', 'R1', 'TS1') - - self.assertIsNone(res) - mock_dump.assert_called_once_with(logging.INFO, action, - 'P1', 'R1', 'TS1') - - def test_debug(self, mock_dump): - entity = mock.Mock(id='1234567890') - entity.name = 'fake_obj' - action = mock.Mock(id='FAKE_ID', entity=entity, action='ACTION_NAME') - - res = event.debug(action, 'P1', 'R1', 'TS1') - - self.assertIsNone(res) - mock_dump.assert_called_once_with(logging.DEBUG, action, - 'P1', 'R1', 'TS1') diff --git a/senlin/tests/unit/engine/test_health_manager.py b/senlin/tests/unit/engine/test_health_manager.py deleted file mode 100644 index 783277454..000000000 --- a/senlin/tests/unit/engine/test_health_manager.py +++ /dev/null @@ -1,1619 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import time -from unittest import mock - -from oslo_config import cfg -from oslo_utils import timeutils as tu - -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common import utils -from senlin.engine import health_manager as hm -from senlin.engine import node as node_mod -from senlin.engine.notifications import nova_endpoint -from senlin import objects -from senlin.objects import cluster as obj_cluster -from senlin.objects import node as obj_node -from senlin.objects import profile as obj_profile -from senlin.rpc import client as rpc_client -from senlin.tests.unit.common import base - - -class TestChaseUp(base.SenlinTestCase): - - def test_less_than_one_interval(self): - start = tu.utcnow(True) - # we assume that the delay before next line is < 5 seconds - res = hm.chase_up(start, 5) - - self.assertLessEqual(res, 5) - - def test_more_than_one_interval(self): - start = tu.utcnow(True) - time.sleep(2) - - # we assume that the delay before next line is < 5 seconds - res = hm.chase_up(start, 1) - - self.assertLessEqual(res, 1) - - -@mock.patch('oslo_messaging.NotificationFilter') -class TestNovaNotificationEndpoint(base.SenlinTestCase): - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_init(self, mock_rpc, mock_filter): - x_filter = mock_filter.return_value - event_map = { - 'compute.instance.pause.end': 'PAUSE', - 'compute.instance.power_off.end': 'POWER_OFF', - 'compute.instance.rebuild.error': 'REBUILD', - 'compute.instance.shutdown.end': 'SHUTDOWN', - 'compute.instance.soft_delete.end': 'SOFT_DELETE', - } - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - - mock_filter.assert_called_once_with( - publisher_id='^compute.*', - event_type='^compute\.instance\..*', - context={'project_id': '^PROJECT$'}) - mock_rpc.assert_called_once_with() - self.assertEqual(x_filter, endpoint.filter_rule) - self.assertEqual(mock_rpc.return_value, endpoint.rpc) - for e in event_map: - self.assertIn(e, endpoint.VM_FAILURE_EVENTS) - self.assertEqual(event_map[e], endpoint.VM_FAILURE_EVENTS[e]) - self.assertEqual('PROJECT', endpoint.project_id) - self.assertEqual('CLUSTER_ID', endpoint.cluster_id) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'metadata': { - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_id': 'FAKE_NODE', - 'cluster_node_index': '123', - }, - 'instance_id': 'PHYSICAL_ID', - 'user_id': 'USER', - 'state': 'shutoff', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('FAKE_NODE', req.identity) - expected_params = { - 'event': 'SHUTDOWN', - 'state': 'shutoff', - 'instance_id': 'PHYSICAL_ID', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD' - } - self.assertEqual(expected_params, req.params) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_metadata(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_cluster_in_metadata(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'foo': 'bar'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_cluster_id_not_match(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'FOOBAR'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_event_type_not_interested(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.start', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_no_node_id(self, mock_rpc, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = {'metadata': {'cluster_id': 'CLUSTER_ID'}} - metadata = {'timestamp': 'TIMESTAMP'} - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.delete.end', - payload, metadata) - - self.assertIsNone(res) - self.assertEqual(0, x_rpc.node_recover.call_count) - - @mock.patch.object(context.RequestContext, 'from_dict') - @mock.patch('senlin.rpc.client.get_engine_client') - def test_info_default_values(self, mock_rpc, mock_context, mock_filter): - x_rpc = mock_rpc.return_value - recover_action = {'operation': 'REBUILD'} - endpoint = nova_endpoint.NovaNotificationEndpoint( - 'PROJECT', 'CLUSTER_ID', recover_action - ) - ctx = mock.Mock() - payload = { - 'metadata': { - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_id': 'NODE_ID' - }, - 'user_id': 'USER', - } - metadata = {'timestamp': 'TIMESTAMP'} - call_ctx = mock.Mock() - mock_context.return_value = call_ctx - - res = endpoint.info(ctx, 'PUBLISHER', 'compute.instance.shutdown.end', - payload, metadata) - - self.assertIsNone(res) - x_rpc.call.assert_called_once_with(call_ctx, 'node_recover', mock.ANY) - req = x_rpc.call.call_args[0][2] - self.assertIsInstance(req, objects.NodeRecoverRequest) - self.assertEqual('NODE_ID', req.identity) - expected_params = { - 'event': 'SHUTDOWN', - 'state': 'Unknown', - 'instance_id': 'Unknown', - 'timestamp': 'TIMESTAMP', - 'publisher': 'PUBLISHER', - 'operation': 'REBUILD', - } - self.assertEqual(expected_params, req.params) - - -@mock.patch( - 'senlin.engine.notifications.heat_endpoint.HeatNotificationEndpoint') -@mock.patch( - 'senlin.engine.notifications.nova_endpoint.NovaNotificationEndpoint') -@mock.patch('oslo_messaging.get_notification_transport') -@mock.patch('oslo_messaging.get_notification_listener') -class TestListenerProc(base.SenlinTestCase): - - def test_listener_proc_nova(self, mock_listener, mock_transport, - mock_novaendpoint, mock_heatendpoint): - cfg.CONF.set_override('nova_control_exchange', 'FAKE_EXCHANGE', - group='health_manager') - - x_listener = mock.Mock() - mock_listener.return_value = x_listener - x_transport = mock.Mock() - mock_transport.return_value = x_transport - x_endpoint = mock.Mock() - mock_novaendpoint.return_value = x_endpoint - - recover_action = {'operation': 'REBUILD'} - res = hm.ListenerProc('FAKE_EXCHANGE', 'PROJECT_ID', 'CLUSTER_ID', - recover_action) - - self.assertIsNone(res) - mock_transport.assert_called_once_with(cfg.CONF) - mock_novaendpoint.assert_called_once_with('PROJECT_ID', 'CLUSTER_ID', - recover_action) - mock_listener.assert_called_once_with( - x_transport, [mock_novaendpoint().target], [x_endpoint], - executor='threading', pool="senlin-listeners") - x_listener.start.assert_called_once_with() - - def test_listener_proc_heat(self, mock_listener, mock_transport, - mock_novaendpoint, mock_heatendpoint): - x_listener = mock.Mock() - mock_listener.return_value = x_listener - x_transport = mock.Mock() - mock_transport.return_value = x_transport - x_endpoint = mock.Mock() - mock_heatendpoint.return_value = x_endpoint - - recover_action = {'operation': 'REBUILD'} - res = hm.ListenerProc('heat', 'PROJECT_ID', 'CLUSTER_ID', - recover_action) - - self.assertIsNone(res) - mock_transport.assert_called_once_with(cfg.CONF) - mock_heatendpoint.assert_called_once_with('PROJECT_ID', 'CLUSTER_ID', - recover_action) - mock_listener.assert_called_once_with( - x_transport, [mock_heatendpoint().target], [x_endpoint], - executor='threading', pool="senlin-listeners") - x_listener.start.assert_called_once_with() - - -class TestHealthCheckType(base.SenlinTestCase): - def setUp(self): - super(TestHealthCheckType, self).setUp() - - self.hc = hm.NodePollStatusHealthCheck( - cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1, - params='' - ) - - def test_factory(self): - cid = 'CLUSTER_ID' - interval = 1 - params = { - 'detection_modes': [ - { - 'type': 'NODE_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - }, - { - 'type': 'HYPERVISOR_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - }, - { - 'type': 'NODE_STATUS_POLL_URL', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - } - ], - 'node_update_timeout': 300, - } - - for d in params['detection_modes']: - hc = hm.HealthCheckType.factory(d['type'], cid, interval, params) - - self.assertEqual(cid, hc.cluster_id) - self.assertEqual(interval, hc.interval) - self.assertEqual(d, hc.params) - self.assertEqual( - params['node_update_timeout'], hc.node_update_timeout) - - def test_factory_invalid_type(self): - cid = 'CLUSTER_ID' - interval = 1 - params = { - 'detection_modes': [ - { - 'type': 'blah', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - }, - ], - 'node_update_timeout': 300, - } - - with self.assertRaisesRegex(Exception, 'Invalid detection type: blah'): - hm.HealthCheckType.factory('blah', cid, interval, params) - - def test_factory_same_type_twice(self): - cid = 'CLUSTER_ID' - interval = 1 - params = { - 'detection_modes': [ - { - 'type': 'NODE_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - }, - { - 'type': 'NODE_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - } - ], - 'node_update_timeout': 300, - } - - with self.assertRaisesRegex( - Exception, - '.*Encountered 2 instances of type NODE_STATUS_POLLING'): - hm.HealthCheckType.factory( - 'NODE_STATUS_POLLING', cid, interval, params) - - -class TestNodePollStatusHealthCheck(base.SenlinTestCase): - def setUp(self): - super(TestNodePollStatusHealthCheck, self).setUp() - - self.hc = hm.NodePollStatusHealthCheck( - cluster_id='CLUSTER_ID', - interval=1, node_update_timeout=1, params='' - ) - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_healthy(self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = True - mock_node_obj.return_value = x_entity - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_not_called() - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_healthy_internal_error( - self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.side_effect = exc.InternalError( - message='error') - mock_node_obj.return_value = x_entity - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_not_called() - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_unhealthy(self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = False - mock_node_obj.return_value = x_entity - - mock_tu.return_value = True - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertFalse(res) - mock_tu.assert_called_once_with(node.updated_at, 1) - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_unhealthy_within_timeout( - self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = False - mock_node_obj.return_value = x_entity - - mock_tu.return_value = False - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_called_once_with(node.updated_at, 1) - - -class TestHypervisorPollStatusHealthCheck(base.SenlinTestCase): - def setUp(self): - super(TestHypervisorPollStatusHealthCheck, self).setUp() - - self.hc = hm.HypervisorPollStatusHealthCheck( - cluster_id='CLUSTER_ID', - interval=1, node_update_timeout=1, params='' - ) - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_healthy(self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = True - mock_node_obj.return_value = x_entity - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_not_called() - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_healthy_internal_error( - self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.side_effect = exc.InternalError( - message='error') - mock_node_obj.return_value = x_entity - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_not_called() - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_unhealthy(self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = False - mock_node_obj.return_value = x_entity - - mock_tu.return_value = True - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertFalse(res) - mock_tu.assert_called_once_with(node.updated_at, 1) - - @mock.patch.object(node_mod.Node, '_from_object') - @mock.patch.object(tu, 'is_older_than') - def test_run_health_check_unhealthy_within_timeout( - self, mock_tu, mock_node_obj): - x_entity = mock.Mock() - x_entity.do_healthcheck.return_value = False - mock_node_obj.return_value = x_entity - - mock_tu.return_value = False - - ctx = mock.Mock() - node = mock.Mock(id='FAKE_NODE1', status="ERROR", - updated_at='2018-08-13 18:00:00', - init_at='2018-08-13 17:00:00') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_tu.assert_called_once_with(node.updated_at, 1) - - -class TestNodePollUrlHealthCheck(base.SenlinTestCase): - def setUp(self): - super(TestNodePollUrlHealthCheck, self).setUp() - - default_params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5 - } - - self.hc = hm.NodePollUrlHealthCheck( - cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1, - params=default_params - ) - - def test_expand_url_template(self): - url_template = 'https://abc123/foo/bar' - node = mock.Mock() - - # do it - res = self.hc._expand_url_template(url_template, node) - - self.assertEqual(res, url_template) - - def test_expand_url_template_nodename(self): - node = mock.Mock() - node.name = 'name' - url_template = 'https://abc123/{nodename}/bar' - expanded_url = 'https://abc123/{}/bar'.format(node.name) - - # do it - res = self.hc._expand_url_template(url_template, node) - - self.assertEqual(res, expanded_url) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_healthy( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = ("Healthy because this return value " - "contains FAKE_HEALTHY_PATTERN") - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, - verify=True) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_healthy_min_timeout( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = ("Healthy because this return value " - "contains FAKE_HEALTHY_PATTERN") - - self.hc.params['poll_url_retry_interval'] = 0 - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, - verify=True) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_healthy_timeout( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = ("Healthy because this return value " - "contains FAKE_HEALTHY_PATTERN") - - self.hc.params['poll_url_retry_interval'] = 100 - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=10, - verify=True) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_unhealthy_inactive( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_RECOVERING - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = "" - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_not_called() - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_unhealthy_update_timeout( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.id = 'FAKE_NODE_ID' - node.updated_at = 'FAKE_UPDATE_TIME' - node.status = consts.NS_ACTIVE - mock_time.return_value = False - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = "" - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_has_calls( - [mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)]) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_unhealthy_init_timeout( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.id = 'FAKE_NODE_ID' - node.updated_at = None - node.init_at = 'FAKE_INIT_TIME' - node.status = consts.NS_ACTIVE - mock_time.return_value = False - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = "" - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_has_calls( - [mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True)]) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_unhealthy(self, mock_url_fetch, mock_expand_url, - mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - node.id = 'FAKE_ID' - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.return_value = "" - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertFalse(res) - mock_url_fetch.assert_has_calls( - [ - mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), - mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True) - ] - ) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_conn_error(self, - mock_url_fetch, - mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - node.id = 'FAKE_ID' - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.side_effect = utils.URLFetchError("Error") - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertFalse(res) - mock_url_fetch.assert_has_calls( - [ - mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), - mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True) - ] - ) - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_conn_other_error(self, - mock_url_fetch, - mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - node.id = 'FAKE_ID' - mock_time.return_value = True - mock_expand_url.side_effect = Exception('blah') - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_not_called() - - @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") - @mock.patch.object(utils, 'url_fetch') - def test_run_health_check_conn_error_noop( - self, mock_url_fetch, mock_expand_url, mock_time): - ctx = mock.Mock() - node = mock.Mock() - node.status = consts.NS_ACTIVE - node.id = 'FAKE_ID' - mock_time.return_value = True - mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - mock_url_fetch.side_effect = utils.URLFetchError("Error") - - self.hc.params['poll_url_conn_error_as_unhealthy'] = False - - # do it - res = self.hc.run_health_check(ctx, node) - - self.assertTrue(res) - mock_url_fetch.assert_has_calls( - [ - mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), - ] - ) - - -class TestHealthCheck(base.SenlinTestCase): - - def setUp(self): - super(TestHealthCheck, self).setUp() - ctx = mock.Mock() - self.fake_rpc = mock.Mock() - with mock.patch.object(rpc_client, 'get_engine_client', - return_value=self.fake_rpc): - self.hc = hm.HealthCheck( - ctx=ctx, - engine_id='ENGINE_ID', - cluster_id='CID', - check_type=consts.NODE_STATUS_POLLING, - interval=60, - node_update_timeout=60, - params={ - 'node_update_timeout': 60, - 'detection_modes': [ - {'type': consts.NODE_STATUS_POLLING} - ], - 'recovery_conditional': consts.ANY_FAILED - }, - enabled=True) - - def test_get_health_check_types_polling(self): - self.hc.get_health_check_types() - self.assertEqual(consts.POLLING, self.hc.type) - - def test_get_health_check_types_events(self): - self.hc.check_type = consts.LIFECYCLE_EVENTS - self.hc.get_health_check_types() - self.assertEqual(consts.EVENTS, self.hc.type) - - def test_get_recover_actions(self): - self.hc.params = { - 'node_delete_timeout': 60, - 'node_force_recreate': True, - 'recover_action': [{'name': 'FAKE_RECOVER_ACTION'}] - } - self.hc.get_recover_actions() - self.assertEqual(self.hc.params['node_delete_timeout'], - self.hc.recover_action['delete_timeout']) - self.assertEqual(self.hc.params['node_force_recreate'], - self.hc.recover_action['force_recreate']) - self.assertEqual(self.hc.params['recover_action'][0]['name'], - self.hc.recover_action['operation']) - - @mock.patch.object(obj_node.Node, 'get_all_by_cluster') - @mock.patch.object(hm.HealthCheck, "_recover_node") - @mock.patch.object(hm.HealthCheck, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test_execute_health_check_any_mode_healthy( - self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID', - id='CID') - mock_get.return_value = x_cluster - - ctx = mock.Mock() - mock_ctx.return_value = ctx - - mock_wait.return_value = (True, "") - - x_node1 = mock.Mock(id='FAKE_NODE1', status="ERROR") - x_node2 = mock.Mock(id='FAKE_NODE2', status="ERROR") - mock_nodes.return_value = [x_node1, x_node2] - - hc_true = {'run_health_check.return_value': True} - - hc_test_values = [ - [ - mock.Mock(**hc_true), - mock.Mock(**hc_true), - mock.Mock(**hc_true), - ], - ] - - for hc_mocks in hc_test_values: - self.hc.health_check_types = hc_mocks - - mock_get.reset_mock() - mock_ctx.reset_mock() - mock_recover.reset_mock() - mock_wait.reset_mock() - - # do it - self.hc.execute_health_check() - - mock_get.assert_called_once_with(self.hc.ctx, 'CID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id=x_cluster.user, - project_id=x_cluster.project) - - for mock_hc in hc_mocks: - mock_hc.run_health_check.assert_has_calls( - [ - mock.call(ctx, x_node1), - mock.call(ctx, x_node2) - ] - ) - - mock_recover.assert_not_called() - mock_wait.assert_not_called() - - @mock.patch.object(obj_node.Node, 'get_all_by_cluster') - @mock.patch.object(hm.HealthCheck, "_recover_node") - @mock.patch.object(hm.HealthCheck, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test_execute_health_check_all_mode_unhealthy( - self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): - self.hc.cluster_id = 'CLUSTER_ID' - self.hc.interval = 1 - self.hc.recovery_cond = consts.ALL_FAILED - self.hc.node_update_timeout = 1 - self.hc.recovery_action = {'operation': 'REBUILD'} - - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID', - id='CLUSTER_ID') - mock_get.return_value = x_cluster - - ctx = mock.Mock() - mock_ctx.return_value = ctx - - mock_wait.return_value = (True, "") - - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - mock_nodes.return_value = [x_node] - - mock_recover.return_value = {'action': 'FAKE_ACTION_ID'} - - hc_false = {'run_health_check.return_value': False} - - hc_test_values = [ - [ - mock.Mock(**hc_false), - ] - ] - - for hc_mocks in hc_test_values: - self.hc.health_check_types = hc_mocks - - mock_get.reset_mock() - mock_ctx.reset_mock() - mock_recover.reset_mock() - mock_wait.reset_mock() - - # do it - self.hc.execute_health_check() - - mock_get.assert_called_once_with(self.hc.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id=x_cluster.user, - project_id=x_cluster.project) - - for mock_hc in hc_mocks: - mock_hc.run_health_check.assert_has_calls( - [ - mock.call(ctx, x_node) - ] - ) - - mock_recover.assert_called_once_with(ctx, 'FAKE_NODE') - mock_wait.assert_called_once_with( - ctx, 'FAKE_ACTION_ID', self.hc.node_update_timeout) - - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test_execute_health_check_cluster_not_found(self, mock_ctx, mock_get): - mock_get.return_value = None - - self.hc.execute_health_check() - - mock_ctx.assert_not_called() - - @mock.patch.object(hm.HealthCheck, "_recover_node") - def test_check_node_health_any_failed(self, mock_recover): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID', - id='CLUSTER_ID') - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - ctx = mock.Mock() - - self.hc.params['recovery_conditional'] = consts.ANY_FAILED - mock_hc_1 = mock.Mock() - mock_hc_1.run_health_check.return_value = True - mock_hc_2 = mock.Mock() - mock_hc_2.run_health_check.return_value = False - - self.hc.health_check_types = [mock_hc_1, mock_hc_2] - - self.hc._check_node_health(ctx, x_node, x_cluster) - - mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node) - mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node) - mock_recover.assert_called_once_with(ctx, x_node.id) - - @mock.patch.object(hm.HealthCheck, "_recover_node") - def test_check_node_health_all_failed(self, mock_recover): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID', - id='CLUSTER_ID') - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - ctx = mock.Mock() - - self.hc.params['recovery_conditional'] = consts.ALL_FAILED - mock_hc_1 = mock.Mock() - mock_hc_1.run_health_check.return_value = False - mock_hc_2 = mock.Mock() - mock_hc_2.run_health_check.return_value = False - - self.hc.health_check_types = [mock_hc_1, mock_hc_2] - - self.hc._check_node_health(ctx, x_node, x_cluster) - - mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node) - mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node) - mock_recover.assert_called_once_with(ctx, x_node.id) - - @mock.patch.object(hm.HealthCheck, "_recover_node") - def test_check_node_health_all_failed_negative(self, mock_recover): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID', - id='CLUSTER_ID') - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - ctx = mock.Mock() - - self.hc.params['recovery_conditional'] = consts.ALL_FAILED - mock_hc_1 = mock.Mock() - mock_hc_1.run_health_check.return_value = False - mock_hc_2 = mock.Mock() - mock_hc_2.run_health_check.return_value = True - - self.hc.health_check_types = [mock_hc_1, mock_hc_2] - - self.hc._check_node_health(ctx, x_node, x_cluster) - - mock_hc_1.run_health_check.assert_called_once_with(ctx, x_node) - mock_hc_2.run_health_check.assert_called_once_with(ctx, x_node) - mock_recover.assert_not_called() - - @mock.patch('senlin.objects.ActionGetRequest') - def test_wait_for_action(self, mock_action_req): - x_req = mock.Mock() - mock_action_req.return_value = x_req - - x_action = {'status': consts.ACTION_SUCCEEDED} - self.fake_rpc.call.return_value = x_action - - ctx = mock.Mock() - action_id = 'FAKE_ACTION_ID' - timeout = 5 - - # do it - res, err = self.hc._wait_for_action(ctx, action_id, timeout) - - self.assertTrue(res) - self.assertEqual(err, '') - self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req) - - @mock.patch('senlin.objects.ActionGetRequest') - def test_wait_for_action_success_before_timeout(self, mock_action_req): - x_req = mock.Mock() - mock_action_req.return_value = x_req - - x_action1 = {'status': consts.ACTION_RUNNING} - x_action2 = {'status': consts.ACTION_SUCCEEDED} - self.fake_rpc.call.side_effect = [x_action1, x_action2] - - ctx = mock.Mock() - action_id = 'FAKE_ACTION_ID' - timeout = 5 - - # do it - res, err = self.hc._wait_for_action(ctx, action_id, timeout) - - self.assertTrue(res) - self.assertEqual(err, '') - self.fake_rpc.call.assert_has_calls( - [ - mock.call(ctx, 'action_get', x_req), - mock.call(ctx, 'action_get', x_req) - ] - ) - - @mock.patch('senlin.objects.ActionGetRequest') - def test_wait_for_action_timeout(self, mock_action_req): - x_req = mock.Mock() - mock_action_req.return_value = x_req - - x_action = {'status': consts.ACTION_RUNNING} - self.fake_rpc.call.return_value = x_action - - ctx = mock.Mock() - action_id = 'FAKE_ACTION_ID' - timeout = 5 - - # do it - res, err = self.hc._wait_for_action(ctx, action_id, timeout) - - self.assertFalse(res) - self.assertTrue(re.search('timeout', err, re.IGNORECASE)) - self.fake_rpc.call.assert_has_calls( - [ - mock.call(ctx, 'action_get', x_req) - ] - ) - - @mock.patch('senlin.objects.ActionGetRequest') - def test_wait_for_action_failed(self, mock_action_req): - x_req = mock.Mock() - mock_action_req.return_value = x_req - - x_action = {'status': consts.ACTION_FAILED} - self.fake_rpc.call.return_value = x_action - - ctx = mock.Mock() - action_id = 'FAKE_ACTION_ID' - timeout = 5 - - # do it - res, err = self.hc._wait_for_action(ctx, action_id, timeout) - - self.assertFalse(res) - self.assertEqual(err, 'Cluster check action failed or cancelled') - self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req) - - @mock.patch('senlin.objects.ActionGetRequest') - def test_wait_for_action_cancelled(self, mock_action_req): - x_req = mock.Mock() - mock_action_req.return_value = x_req - - x_action = {'status': consts.ACTION_CANCELLED} - self.fake_rpc.call.return_value = x_action - - ctx = mock.Mock() - action_id = 'FAKE_ACTION_ID' - timeout = 5 - - # do it - res, err = self.hc._wait_for_action(ctx, action_id, timeout) - - self.assertFalse(res) - self.assertEqual(err, 'Cluster check action failed or cancelled') - self.fake_rpc.call.assert_called_with(ctx, 'action_get', x_req) - - @mock.patch('senlin.objects.NodeRecoverRequest', autospec=True) - def test_recover_node(self, mock_req): - ctx = mock.Mock() - node_id = 'FAKE_NODE' - self.hc.recover_action = {'operation': 'REBUILD'} - - x_req = mock.Mock - mock_req.return_value = x_req - - x_action = {'action': 'RECOVER_ID1'} - self.fake_rpc.call.return_value = x_action - - # do it - res = self.hc._recover_node(ctx, node_id) - - self.assertEqual(x_action, res) - mock_req.assert_called_once_with( - identity=node_id, params=self.hc.recover_action) - self.fake_rpc.call.assert_called_once_with(ctx, 'node_recover', x_req) - - @mock.patch('senlin.objects.NodeRecoverRequest', autospec=True) - def test_recover_node_failed(self, mock_req): - ctx = mock.Mock() - node_id = 'FAKE_NODE' - self.hc.recover_action = {'operation': 'REBUILD'} - - x_req = mock.Mock - mock_req.return_value = x_req - - self.fake_rpc.call.side_effect = Exception('boom') - - # do it - res = self.hc._recover_node(ctx, node_id) - - self.assertIsNone(res) - mock_req.assert_called_once_with( - identity=node_id, params=self.hc.recover_action) - self.fake_rpc.call.assert_called_once_with(ctx, 'node_recover', x_req) - - @mock.patch('senlin.objects.HealthRegistry', autospec=True) - def test_db_create(self, mock_hrdb): - self.hc.db_create() - mock_hrdb.create.assert_called_once_with( - self.hc.ctx, self.hc.cluster_id, self.hc.check_type, - self.hc.interval, self.hc.params, self.hc.engine_id, - self.hc.enabled) - - @mock.patch('senlin.objects.HealthRegistry', autospec=True) - def test_db_delete(self, mock_hrdb): - self.hc.db_delete() - mock_hrdb.delete.assert_called_once_with(self.hc.ctx, - self.hc.cluster_id) - - @mock.patch('senlin.objects.HealthRegistry', autospec=True) - def test_enable(self, mock_hrdb): - self.hc.enable() - mock_hrdb.update.assert_called_once_with( - self.hc.ctx, self.hc.cluster_id, {'enabled': True}) - - @mock.patch('senlin.objects.HealthRegistry', autospec=True) - def test_disable(self, mock_hrdb): - self.hc.disable() - mock_hrdb.update.assert_called_once_with( - self.hc.ctx, self.hc.cluster_id, {'enabled': False}) - - -class TestRuntimeHealthRegistry(base.SenlinTestCase): - - def setUp(self): - super(TestRuntimeHealthRegistry, self).setUp() - - mock_ctx = mock.Mock() - self.mock_tg = mock.Mock() - self.rhr = hm.RuntimeHealthRegistry(mock_ctx, 'ENGINE_ID', - self.mock_tg) - - def create_mock_entry(self, ctx=None, engine_id='ENGINE_ID', - cluster_id='CID', - check_type=None, - interval=60, node_update_timeout=60, params=None, - enabled=True, timer=None, listener=None, - type=consts.POLLING): - mock_entry = mock.Mock( - ctx=ctx, - engine_id=engine_id, - cluster_id=cluster_id, - check_type=check_type, - interval=interval, - node_update_timeout=node_update_timeout, - params=params, - enabled=enabled, - timer=timer, - listener=listener, - execute_health_check=mock.Mock(), - type=type) - return mock_entry - - @mock.patch.object(hm, 'HealthCheck') - def test_register_cluster(self, mock_hc): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING]) - mock_entry.db_create = mock.Mock() - mock_hc.return_value = mock_entry - - self.rhr.register_cluster('CID', 60, 60, {}) - - self.assertEqual(mock_entry, self.rhr.registries['CID']) - self.mock_tg.add_dynamic_timer.assert_called_once_with( - mock_entry.execute_health_check, None, None) - self.mock_tg.add_thread.assert_not_called() - mock_entry.db_create.assert_called_once_with() - - @mock.patch.object(hm, 'HealthCheck') - def test_register_cluster_failed(self, mock_hc): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING]) - mock_entry.db_create = mock.Mock() - mock_entry.db_delete = mock.Mock() - - mock_hc.return_value = mock_entry - self.rhr.add_health_check = mock.Mock() - self.rhr.add_health_check.side_effect = Exception - - self.rhr.register_cluster('CID', 60, 60, {}) - - self.assertEqual(mock_entry, self.rhr.registries['CID']) - self.mock_tg.add_dynamic_timer.assert_not_called() - self.mock_tg.add_thread.assert_not_called() - mock_entry.db_create.assert_called_once_with() - mock_entry.db_delete.assert_called_once_with() - - def test_unregister_cluster_with_timer(self): - timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - timer=timer) - self.rhr.registries['CID'] = mock_entry - mock_entry.db_delete = mock.Mock() - - self.rhr.unregister_cluster('CID') - - mock_entry.db_delete.assert_called_once_with() - timer.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_called_once_with(timer) - self.assertIsNone(mock_entry.timer) - - def test_unregister_cluster_with_listener(self): - listener = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - listener=listener) - self.rhr.registries['CID'] = mock_entry - mock_entry.db_delete = mock.Mock() - - self.rhr.unregister_cluster('CID') - - mock_entry.db_delete.assert_called_once_with() - listener.stop.assert_called_once_with() - self.mock_tg.thread_done.assert_called_once_with(listener) - self.assertIsNone(mock_entry.listener) - - def test_unregister_cluster_failed(self): - listener = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - listener=listener) - self.rhr.registries['CID'] = mock_entry - mock_entry.db_delete.side_effect = Exception - - self.rhr.unregister_cluster('CID') - - listener.stop.assert_called_once_with() - self.mock_tg.thread_done.assert_called_once_with(listener) - self.assertIsNone(mock_entry.listener) - - def test_enable_cluster(self): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - enabled=False) - - def mock_enable(): - mock_entry.enabled = True - return True - - mock_entry.enable = mock_enable - - self.rhr.registries['CID'] = mock_entry - - self.rhr.enable_cluster('CID') - - self.assertTrue(mock_entry.enabled) - self.mock_tg.add_dynamic_timer.assert_called_once_with( - mock_entry.execute_health_check, None, None) - self.mock_tg.add_thread.assert_not_called() - - def test_enable_cluster_failed(self): - timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - enabled=False, timer=timer) - mock_entry.enable = mock.Mock() - mock_entry.enable.side_effect = Exception - - self.rhr.registries['CID'] = mock_entry - - self.rhr.enable_cluster('CID') - - self.mock_tg.add_dynamic_timer.assert_not_called() - self.mock_tg.add_thread.assert_not_called() - timer.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_called_once_with(timer) - - def test_disable_cluster(self): - timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - enabled=True, timer=timer) - - def mock_disable(): - mock_entry.enabled = False - - mock_entry.disable = mock_disable - - self.rhr.registries['CID'] = mock_entry - - self.rhr.disable_cluster('CID') - - self.assertEqual(False, mock_entry.enabled) - - self.mock_tg.add_dynamic_timer.assert_not_called() - self.mock_tg.add_thread.assert_not_called() - timer.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_called_once_with(timer) - - def test_disable_cluster_failed(self): - timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], - enabled=True, timer=timer) - - mock_entry.enable.side_effect = Exception - - self.rhr.registries['CID'] = mock_entry - - self.rhr.disable_cluster('CID') - - self.mock_tg.add_dynamic_timer.assert_not_called() - self.mock_tg.add_thread.assert_not_called() - timer.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_called_once_with(timer) - - def test_add_timer(self): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING]) - self.rhr.registries['CID'] = mock_entry - fake_timer = mock.Mock() - self.mock_tg.add_dynamic_timer = mock.Mock() - self.mock_tg.add_dynamic_timer.return_value = fake_timer - - self.rhr._add_timer('CID') - - self.assertEqual(fake_timer, mock_entry.timer) - self.mock_tg.add_dynamic_timer.assert_called_once_with( - mock_entry.execute_health_check, None, None) - - def test_add_timer_failed(self): - fake_timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer) - self.rhr.registries['CID'] = mock_entry - self.mock_tg.add_dynamic_timer = mock.Mock() - - self.rhr._add_timer('CID') - - self.assertEqual(fake_timer, mock_entry.timer) - self.mock_tg.add_dynamic_timer.assert_not_called() - - @mock.patch.object(obj_profile.Profile, 'get') - @mock.patch.object(obj_cluster.Cluster, 'get') - def test_add_listener_nova(self, mock_cluster, mock_profile): - cfg.CONF.set_override('nova_control_exchange', 'FAKE_NOVA_EXCHANGE', - group='health_manager') - mock_entry = self.create_mock_entry( - check_type=[consts.LIFECYCLE_EVENTS]) - self.rhr.registries['CID'] = mock_entry - fake_listener = mock.Mock() - x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID') - mock_cluster.return_value = x_cluster - x_profile = mock.Mock(type='os.nova.server-1.0') - mock_profile.return_value = x_profile - self.mock_tg.add_thread = mock.Mock() - self.mock_tg.add_thread.return_value = fake_listener - - self.rhr._add_listener('CID') - - mock_cluster.assert_called_once_with(self.rhr.ctx, 'CID', - project_safe=False) - mock_profile.assert_called_once_with(self.rhr.ctx, 'PROFILE_ID', - project_safe=False) - self.mock_tg.add_thread.assert_called_once_with( - hm.ListenerProc, 'FAKE_NOVA_EXCHANGE', 'PROJECT_ID', 'CID', - mock_entry.recover_action) - - @mock.patch.object(obj_profile.Profile, 'get') - @mock.patch.object(obj_cluster.Cluster, 'get') - def test_add_listener_heat(self, mock_cluster, mock_profile): - cfg.CONF.set_override('heat_control_exchange', 'FAKE_HEAT_EXCHANGE', - group='health_manager') - mock_entry = self.create_mock_entry( - check_type=[consts.LIFECYCLE_EVENTS]) - self.rhr.registries['CID'] = mock_entry - fake_listener = mock.Mock() - x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID') - mock_cluster.return_value = x_cluster - x_profile = mock.Mock(type='os.heat.stack-1.0') - mock_profile.return_value = x_profile - self.mock_tg.add_thread = mock.Mock() - self.mock_tg.add_thread.return_value = fake_listener - - self.rhr._add_listener('CID') - - mock_cluster.assert_called_once_with(self.rhr.ctx, 'CID', - project_safe=False) - mock_profile.assert_called_once_with(self.rhr.ctx, 'PROFILE_ID', - project_safe=False) - self.mock_tg.add_thread.assert_called_once_with( - hm.ListenerProc, 'FAKE_HEAT_EXCHANGE', 'PROJECT_ID', 'CID', - mock_entry.recover_action) - - @mock.patch.object(obj_profile.Profile, 'get') - @mock.patch.object(obj_cluster.Cluster, 'get') - def test_add_listener_failed(self, mock_cluster, mock_profile): - cfg.CONF.set_override('heat_control_exchange', 'FAKE_HEAT_EXCHANGE', - group='health_manager') - fake_listener = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.LIFECYCLE_EVENTS], listener=fake_listener) - self.rhr.registries['CID'] = mock_entry - - x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID') - mock_cluster.return_value = x_cluster - x_profile = mock.Mock(type='os.heat.stack-1.0') - mock_profile.return_value = x_profile - self.mock_tg.add_thread = mock.Mock() - - self.rhr._add_listener('CID') - - mock_cluster.assert_not_called() - mock_profile.assert_not_called() - - self.mock_tg.add_thread.assert_not_called() - - def test_add_health_check_polling(self): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING]) - self.rhr.registries['CID'] = mock_entry - self.rhr._add_timer = mock.Mock() - self.rhr._add_listener = mock.Mock() - - self.rhr.add_health_check(mock_entry) - - self.rhr._add_timer.assert_called_once_with('CID') - self.rhr._add_listener.assert_not_called() - - def test_add_health_check_events(self): - mock_entry = self.create_mock_entry( - check_type=[consts.LIFECYCLE_EVENTS], type=consts.EVENTS) - self.rhr.registries['CID'] = mock_entry - self.rhr._add_timer = mock.Mock() - self.rhr._add_listener = mock.Mock() - - self.rhr.add_health_check(mock_entry) - - self.rhr._add_timer.assert_not_called() - self.rhr._add_listener.assert_called_once_with('CID') - - def test_add_health_check_disabled(self): - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], enabled=False) - self.rhr.registries['CID'] = mock_entry - self.rhr._add_timer = mock.Mock() - self.rhr._add_listener = mock.Mock() - - self.rhr.add_health_check(mock_entry) - - self.rhr._add_timer.assert_not_called() - self.rhr._add_listener.assert_not_called() - - def test_add_health_check_timer_exists(self): - fake_timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer) - self.rhr.registries['CID'] = mock_entry - self.rhr._add_timer = mock.Mock() - self.rhr._add_listener = mock.Mock() - - self.rhr.add_health_check(mock_entry) - - self.rhr._add_timer.assert_not_called() - self.rhr._add_listener.assert_not_called() - - def test_remove_health_check_timer(self): - fake_timer = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], timer=fake_timer) - self.rhr.registries['CID'] = mock_entry - - self.rhr.remove_health_check(mock_entry) - - fake_timer.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_called_once_with(fake_timer) - self.mock_tg.thread_done.assert_not_called() - self.assertIsNone(mock_entry.timer) - - def test_remove_health_check_listener(self): - fake_listener = mock.Mock() - mock_entry = self.create_mock_entry( - check_type=[consts.NODE_STATUS_POLLING], listener=fake_listener) - self.rhr.registries['CID'] = mock_entry - - self.rhr.remove_health_check(mock_entry) - - fake_listener.stop.assert_called_once_with() - self.mock_tg.timer_done.assert_not_called() - self.mock_tg.thread_done.assert_called_once_with(fake_listener) - self.assertIsNone(mock_entry.listener) diff --git a/senlin/tests/unit/engine/test_node.py b/senlin/tests/unit/engine/test_node.py deleted file mode 100644 index 4cc752b14..000000000 --- a/senlin/tests/unit/engine/test_node.py +++ /dev/null @@ -1,1217 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import exception -from senlin.engine import node as nodem -from senlin.objects import node as node_obj -from senlin.profiles import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -PROFILE_ID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' -NODE_ID = '60efdaa1-06c2-4fcf-ae44-17a2d85ff3ea' - - -class TestNode(base.SenlinTestCase): - - def setUp(self): - super(TestNode, self).setUp() - self.context = utils.dummy_context(project='node_test_project') - self.profile = utils.create_profile(self.context, PROFILE_ID) - self.cluster = utils.create_cluster(self.context, CLUSTER_ID, - PROFILE_ID) - - def test_node_init(self): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, role='first_node') - self.assertIsNone(node.id) - self.assertEqual('node1', node.name) - self.assertIsNone(node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - self.assertEqual('', node.user) - self.assertEqual('', node.project) - self.assertEqual('', node.domain) - self.assertEqual(CLUSTER_ID, node.cluster_id) - self.assertEqual(-1, node.index) - self.assertEqual('first_node', node.role) - - self.assertIsNone(node.init_at) - self.assertIsNone(node.created_at) - self.assertIsNone(node.updated_at) - - self.assertEqual('INIT', node.status) - self.assertEqual('Initializing', node.status_reason) - self.assertEqual({}, node.data) - self.assertEqual({}, node.metadata) - self.assertEqual({}, node.rt) - - def test_node_init_random_name(self): - node = nodem.Node(None, PROFILE_ID, None) - self.assertIsNotNone(node.name) - self.assertEqual(13, len(node.name)) - - def test_node_store_init(self): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context, - role='first_node', index=1) - self.assertIsNone(node.id) - node_id = node.store(self.context) - self.assertIsNotNone(node_id) - - node_info = node_obj.Node.get(self.context, node_id) - self.assertIsNotNone(node_info) - self.assertEqual('node1', node_info.name) - self.assertIsNone(node_info.physical_id) - self.assertEqual(CLUSTER_ID, node_info.cluster_id) - self.assertEqual(PROFILE_ID, node_info.profile_id) - self.assertEqual(self.context.user_id, node_info.user) - self.assertEqual(self.context.project_id, node_info.project) - self.assertEqual(self.context.domain_id, node_info.domain) - self.assertEqual(1, node_info.index) - self.assertEqual('first_node', node.role) - - self.assertIsNotNone(node_info.init_at) - self.assertIsNone(node_info.created_at) - self.assertIsNone(node_info.updated_at) - - self.assertEqual('INIT', node_info.status) - self.assertEqual('Initializing', node_info.status_reason) - self.assertEqual({}, node_info.metadata) - self.assertEqual({}, node_info.data) - - def test_node_store_update(self): - node = nodem.Node('node1', PROFILE_ID, "", user=self.context.user_id, - project=self.context.project_id) - node_id = node.store(self.context) - - node.name = 'new_name' - new_node_id = node.store(self.context) - - self.assertEqual(node_id, new_node_id) - - def test_node_load(self): - ex = self.assertRaises(exception.ResourceNotFound, - nodem.Node.load, - self.context, 'non-existent', None) - self.assertEqual("The node 'non-existent' could not be found.", - str(ex)) - - x_node_id = 'ee96c490-2dee-40c8-8919-4c64b89e326c' - node = utils.create_node(self.context, x_node_id, PROFILE_ID, - CLUSTER_ID) - node_info = nodem.Node.load(self.context, x_node_id) - - self.assertEqual(node.id, node_info.id) - self.assertEqual(node.name, node_info.name) - self.assertEqual(node.physical_id, node_info.physical_id) - self.assertEqual(node.cluster_id, node_info.cluster_id) - self.assertEqual(node.profile_id, node_info.profile_id) - self.assertEqual(node.user, node_info.user) - self.assertEqual(node.project, node_info.project) - self.assertEqual(node.domain, node_info.domain) - self.assertEqual(node.index, node_info.index) - self.assertEqual(node.role, node_info.role) - - self.assertEqual(node.init_at, node_info.init_at) - self.assertEqual(node.created_at, node_info.created_at) - self.assertEqual(node.updated_at, node_info.updated_at) - - self.assertEqual(node.status, node_info.status) - self.assertEqual(node.status_reason, node_info.status_reason) - self.assertEqual(node.metadata, node_info.metadata) - self.assertEqual(node.data, node_info.data) - self.assertEqual(self.profile.name, node_info.rt['profile'].name) - - def test_node_load_diff_project(self): - x_node_id = 'c06840c5-f4e4-49ae-8143-9da5b4c73f38' - utils.create_node(self.context, x_node_id, PROFILE_ID, CLUSTER_ID) - - new_ctx = utils.dummy_context(project='a-different-project') - ex = self.assertRaises(exception.ResourceNotFound, - nodem.Node.load, - new_ctx, x_node_id, None) - self.assertEqual("The node '%s' could not be found." % x_node_id, - str(ex)) - - res = nodem.Node.load(new_ctx, x_node_id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(x_node_id, res.id) - - @mock.patch.object(nodem.Node, '_from_object') - @mock.patch.object(node_obj.Node, 'get_all') - def test_node_load_all(self, mock_get, mock_init): - x_obj_1 = mock.Mock() - x_obj_2 = mock.Mock() - mock_get.return_value = [x_obj_1, x_obj_2] - - x_node_1 = mock.Mock() - x_node_2 = mock.Mock() - mock_init.side_effect = [x_node_1, x_node_2] - - result = nodem.Node.load_all(self.context) - - self.assertEqual([x_node_1, x_node_2], [n for n in result]) - mock_get.assert_called_once_with(self.context, cluster_id=None, - limit=None, marker=None, - sort=None, filters=None, - project_safe=True) - mock_init.assert_has_calls([ - mock.call(self.context, x_obj_1), - mock.call(self.context, x_obj_2)]) - - def test_node_set_status(self): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.store(self.context) - self.assertEqual(nodem.consts.NS_INIT, node.status) - self.assertIsNotNone(node.init_at) - self.assertIsNone(node.created_at) - self.assertIsNone(node.updated_at) - - # create - node.set_status(self.context, consts.NS_CREATING, - reason='Creation in progress') - self.assertEqual('CREATING', node.status) - self.assertEqual('Creation in progress', node.status_reason) - self.assertIsNone(node.created_at) - self.assertIsNone(node.updated_at) - - node.set_status(self.context, consts.NS_ACTIVE, - reason='Creation succeeded') - self.assertEqual('ACTIVE', node.status) - self.assertEqual('Creation succeeded', node.status_reason) - self.assertIsNotNone(node.created_at) - self.assertIsNotNone(node.updated_at) - - # update - node.set_status(self.context, consts.NS_UPDATING, - reason='Update in progress') - self.assertEqual('UPDATING', node.status) - self.assertEqual('Update in progress', node.status_reason) - self.assertIsNotNone(node.created_at) - - node.set_status(self.context, consts.NS_ACTIVE, - reason='Update succeeded') - self.assertEqual('ACTIVE', node.status) - self.assertEqual('Update succeeded', node.status_reason) - self.assertIsNotNone(node.created_at) - self.assertIsNotNone(node.updated_at) - - node.set_status(self.context, consts.NS_ACTIVE) - self.assertEqual('ACTIVE', node.status) - self.assertIsNotNone(node.created_at) - self.assertIsNotNone(node.updated_at) - - # delete - node.set_status(self.context, consts.NS_DELETING, - reason='Deletion in progress') - self.assertEqual('DELETING', node.status) - self.assertEqual('Deletion in progress', node.status_reason) - self.assertIsNotNone(node.created_at) - - @mock.patch.object(pb.Profile, 'get_details') - def test_node_get_details(self, mock_details): - node = nodem.Node('node1', CLUSTER_ID, None) - for physical_id in (None, ''): - node.physical_id = physical_id - self.assertEqual({}, node.get_details(self.context)) - self.assertEqual(0, mock_details.call_count) - - fake_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - node.physical_id = fake_id - mock_details.return_value = {'foo': 'bar'} - res = node.get_details(self.context) - mock_details.assert_called_once_with(self.context, node) - self.assertEqual({'foo': 'bar'}, res) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'create_object') - def test_node_create(self, mock_create, mock_status): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_create.return_value = physical_id - res = node.do_create(self.context) - self.assertTrue(res) - mock_status.assert_any_call(self.context, consts.NS_CREATING, - 'Creation in progress') - mock_status.assert_any_call(self.context, consts.NS_ACTIVE, - 'Creation succeeded', - physical_id=physical_id) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_create_not_init(self, mock_status): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.status = 'NOT_INIT' - res, reason = node.do_create(self.context) - self.assertFalse(res) - self.assertEqual('Node must be in INIT status', reason) - mock_status.assert_any_call(self.context, consts.NS_ERROR, - 'Node must be in INIT status') - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'create_object') - def test_node_create_not_created(self, mock_create, mock_status): - - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - mock_create.side_effect = exception.EResourceCreation( - type='PROFILE', message='Boom', resource_id='test_id') - - res, reason = node.do_create(self.context) - - self.assertFalse(res) - self.assertEqual(str(reason), 'Failed in creating PROFILE: Boom.') - mock_status.assert_any_call(self.context, consts.NS_CREATING, - 'Creation in progress') - mock_status.assert_any_call(self.context, consts.NS_ERROR, - 'Failed in creating PROFILE: Boom.', - physical_id='test_id') - - @mock.patch.object(node_obj.Node, 'delete') - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'delete_object') - def test_node_delete(self, mock_delete, mock_status, mock_db_delete): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.physical_id = uuidutils.generate_uuid() - node.id = uuidutils.generate_uuid() - mock_db_delete.return_value = True - - res = node.do_delete(self.context) - - self.assertTrue(res) - mock_delete.assert_called_once_with(self.context, node) - mock_db_delete.assert_called_once_with(self.context, node.id) - mock_status.assert_called_once_with(self.context, consts.NS_DELETING, - 'Deletion in progress') - - @mock.patch.object(node_obj.Node, 'delete') - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'delete_object') - def test_node_delete_no_physical_id(self, mock_delete, mock_status, - mock_db_delete): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.id = uuidutils.generate_uuid() - self.assertIsNone(node.physical_id) - mock_db_delete.return_value = True - - res = node.do_delete(self.context) - - self.assertTrue(res) - mock_status.assert_called_once_with(self.context, consts.NS_DELETING, - "Deletion in progress") - self.assertTrue(mock_delete.called) - mock_db_delete.assert_called_once_with(self.context, node.id) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'delete_object') - def test_node_delete_EResourceDeletion(self, mock_delete, mock_status): - ex = exception.EResourceDeletion(type='PROFILE', id='NODE_ID', - message='Too Bad') - mock_delete.side_effect = ex - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.physical_id = uuidutils.generate_uuid() - - res = node.do_delete(self.context) - - self.assertFalse(res) - mock_delete.assert_called_once_with(self.context, node) - mock_status.assert_has_calls([ - mock.call(self.context, consts.NS_DELETING, - "Deletion in progress"), - mock.call(self.context, consts.NS_ERROR, - "Failed in deleting PROFILE 'NODE_ID': Too Bad.") - ]) - - @mock.patch.object(node_obj.Node, 'update') - @mock.patch.object(pb.Profile, 'update_object') - def test_node_update_new_profile(self, mock_update, mock_db): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context, - physical_id=uuidutils.generate_uuid()) - node.id = node.store(self.context) - new_id = uuidutils.generate_uuid() - utils.create_profile(self.context, new_id) - mock_update.return_value = True - - res = node.do_update(self.context, {'new_profile_id': new_id}) - - self.assertTrue(res) - mock_update.assert_called_once_with(self.context, node, new_id) - self.assertEqual(new_id, node.profile_id) - self.assertEqual(new_id, node.rt['profile'].id) - mock_db.assert_has_calls([ - mock.call(self.context, node.id, - {'status': consts.NS_UPDATING, - 'status_reason': 'Update in progress'}), - mock.call(self.context, node.id, - {'status': consts.NS_ACTIVE, - 'status_reason': 'Update succeeded', - 'profile_id': new_id, - 'updated_at': mock.ANY}) - ]) - - @mock.patch.object(pb.Profile, 'update_object') - @mock.patch.object(node_obj.Node, 'update') - def test_node_update_name(self, mock_db, mock_update): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.store(self.context) - - physical_id = uuidutils.generate_uuid() - node.physical_id = physical_id - - res = node.do_update(self.context, {'name': 'new_name', - 'role': 'new_role', - 'metadata': {'k': {'m': 'v'}}, - 'bogus': 'foo'}) - - self.assertTrue(res) - self.assertEqual('new_name', node.name) - mock_db.assert_has_calls([ - mock.call(self.context, node.id, - {'status': 'UPDATING', - 'status_reason': 'Update in progress'}), - mock.call(self.context, node.id, - {'status': 'ACTIVE', - 'status_reason': 'Update succeeded', - 'name': 'new_name', - 'role': 'new_role', - 'metadata': {'k': {'m': 'v'}}, - 'updated_at': mock.ANY}) - ]) - self.assertEqual(0, mock_update.call_count) - - def test_node_update_not_created(self): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - self.assertIsNone(node.physical_id) - new_profile_id = '71d8f4dd-1ef9-4308-b7ae-03298b04449e' - res = node.do_update(self.context, new_profile_id) - self.assertFalse(res) - - @mock.patch.object(pb.Profile, 'update_object') - @mock.patch.object(node_obj.Node, 'update') - def test_node_update_EResourceUpdate(self, mock_db, mock_update): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.physical_id = uuidutils.generate_uuid() - node.id = uuidutils.generate_uuid() - - ex = exception.EResourceUpdate(type='PROFILE', id='ID', - message='reason') - mock_update.side_effect = ex - new_id = uuidutils.generate_uuid() - utils.create_profile(self.context, new_id) - - res = node.do_update(self.context, {'new_profile_id': new_id}) - - self.assertFalse(res) - self.assertNotEqual(new_id, node.profile_id) - mock_db.assert_has_calls([ - mock.call( - self.context, node.id, - {"status": "UPDATING", "status_reason": "Update in progress"} - ), - mock.call( - self.context, node.id, - {"status": "ERROR", - "status_reason": "Failed in updating PROFILE 'ID': reason.", - "updated_at": mock.ANY} - ) - ]) - self.assertEqual(1, mock_update.call_count) - - @mock.patch.object(node_obj.Node, 'migrate') - def test_node_join_same_cluster(self, mock_migrate): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - node.index = 1 - res = node.do_join(self.context, CLUSTER_ID) - self.assertTrue(res) - self.assertEqual(1, node.index) - self.assertIsNone(node.updated_at) - self.assertFalse(mock_migrate.called) - - @mock.patch.object(pb.Profile, 'join_cluster') - @mock.patch.object(node_obj.Node, 'migrate') - def test_node_join(self, mock_migrate, mock_join_cluster): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - mock_join_cluster.return_value = True - cluster_id = 'fb8bca7a-a82b-4442-a40f-92d3e3cfb0b9' - - res = node.do_join(self.context, cluster_id) - - self.assertTrue(res) - mock_migrate.assert_called_once_with(self.context, node.id, - cluster_id, mock.ANY) - mock_join_cluster.assert_called_once_with(self.context, node, - cluster_id) - self.assertEqual(cluster_id, node.cluster_id) - self.assertEqual(mock_migrate.return_value.index, node.index) - self.assertIsNotNone(node.updated_at) - - @mock.patch.object(pb.Profile, 'join_cluster') - def test_node_join_fail_profile_call(self, mock_join): - node = nodem.Node('node1', PROFILE_ID, None, self.context) - node.id = uuidutils.generate_uuid() - mock_join.return_value = False - cluster_id = 'fb8bca7a-a82b-4442-a40f-92d3e3cfb0b9' - - res = node.do_join(self.context, cluster_id) - - self.assertFalse(res) - mock_join.assert_called_once_with(self.context, node, cluster_id) - self.assertEqual(-1, node.index) - - @mock.patch.object(node_obj.Node, 'migrate') - def test_node_leave_no_cluster(self, mock_migrate): - node = nodem.Node('node1', PROFILE_ID, '', self.context) - self.assertTrue(node.do_leave(self.context)) - self.assertFalse(mock_migrate.called) - self.assertEqual('', node.cluster_id) - self.assertIsNone(node.updated_at) - - @mock.patch.object(pb.Profile, 'leave_cluster') - @mock.patch.object(node_obj.Node, 'migrate') - def test_node_leave(self, mock_migrate, mock_leave_cluster): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) - mock_leave_cluster.return_value = True - res = node.do_leave(self.context) - self.assertTrue(res) - self.assertEqual('', node.cluster_id) - self.assertIsNotNone(node.updated_at) - self.assertEqual(-1, node.index) - mock_migrate.assert_called_once_with(self.context, node.id, - None, mock.ANY) - mock_leave_cluster.assert_called_once_with(self.context, node) - - @mock.patch.object(pb.Profile, 'leave_cluster') - def test_node_leave_fail_update_server_metadata(self, mock_leave): - node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context, - index=1) - mock_leave.return_value = False - res = node.do_leave(self.context) - self.assertFalse(res) - self.assertNotEqual('', node.cluster_id) - self.assertIsNone(node.updated_at) - self.assertEqual(1, node.index) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'check_object') - def test_node_check(self, mock_check, mock_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.status = consts.NS_ACTIVE - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_check.return_value = True - res = node.do_check(self.context) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE, - 'Check: Node is ACTIVE.') - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'check_object') - def test_node_check_warning(self, mock_check, mock_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.status = consts.NS_WARNING - node.status_reason = 'bad news' - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_check.return_value = True - - res = node.do_check(self.context) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - msg = ("Check: Physical object is ACTIVE but the node status was " - "WARNING. %s") % node.status_reason - mock_status.assert_called_once_with(self.context, consts.NS_WARNING, - msg) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'check_object') - def test_node_check_not_active(self, mock_check, mock_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.status = consts.NS_WARNING - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_check.return_value = False - - res = node.do_check(self.context) - - self.assertTrue(res) - mock_status.assert_called_once_with(self.context, consts.NS_ERROR, - 'Check: Node is not ACTIVE.') - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'check_object') - def test_node_check_check_with_exc(self, mock_check, mock_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - err = exception.EResourceOperation(op='checking', type='server', - id=node.physical_id, - message='failed get') - mock_check.side_effect = err - - res = node.do_check(self.context) - - self.assertFalse(res) - mock_status.assert_called_once_with( - self.context, - consts.NS_ERROR, - "Failed in checking server '%s': failed get." % node.physical_id) - - def test_node_check_no_physical_id(self): - node = nodem.Node('node1', PROFILE_ID, '') - - res = node.do_check(self.context) - - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'check_object') - def test_node_check_no_server(self, mock_check, mock_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - - err = exception.EServerNotFound(type='server', - id=node.physical_id, - message='No Server found') - mock_check.side_effect = err - - res = node.do_check(self.context) - - self.assertTrue(res) - mock_status.assert_called_once_with( - self.context, consts.NS_ERROR, - "Failed in found server '%s': No Server found." - % node.physical_id, - physical_id=None) - - @mock.patch.object(pb.Profile, 'healthcheck_object') - def test_node_healthcheck(self, mock_healthcheck): - node = nodem.Node('node1', PROFILE_ID, '') - node.status = consts.NS_ACTIVE - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_healthcheck.return_value = True - res = node.do_healthcheck(self.context, consts.NODE_STATUS_POLLING) - - self.assertTrue(res) - mock_healthcheck.assert_called_once_with(self.context, node, - consts.NODE_STATUS_POLLING) - - def test_node_healthcheck_no_physical_id(self): - node = nodem.Node('node1', PROFILE_ID, '') - - res = node.do_healthcheck(self.context, consts.NODE_STATUS_POLLING) - - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_new_object(self, mock_recover, mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'RECREATE'} - - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - - # action = node_action.NodeAction(node.id, 'ACTION', self.ctx) - - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - action = mock.Mock() - action.inputs = {'operation': 'SWIM'} - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - self.assertEqual({'recovery': 'RECREATE'}, node.data) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_in_place(self, mock_recover, mock_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_recover.return_value = node.physical_id, True - action = mock.Mock(inputs={}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_recover.assert_called_once_with(self.context, node) - self.assertEqual('node1', node.name) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded')]) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_recover_check_active(self, mock_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - node.status = 'ACTIVE' - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.return_value = True - action = mock.Mock(inputs={'check': True}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_status.assert_called_once_with(self.context, consts.NS_ACTIVE, - reason='Recover: Node is ACTIVE.') - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_check_error(self, mock_recover, mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'recreate'} - - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.return_value = False - action = mock.Mock(inputs={'check': True}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_recreate(self, mock_recover, mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'RECREATE'} - - node = nodem.Node('node1', PROFILE_ID, '', id='fake') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.return_value = False - action = mock.Mock( - outputs={}, inputs={'operation': 'RECREATE', - 'check': True}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_check_exception(self, mock_recover, mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'RECREATE'} - - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.side_effect = exception.EResourceOperation( - op='checking', - type='server', - id=node.physical_id, - reason='Boom!' - ) - action = mock.Mock(inputs={'operation': 'boom', - 'check': True}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_failed_recover(self, mock_recover, mock_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - mock_recover.return_value = node.physical_id, None - action = mock.Mock(inputs={'operation': 'RECREATE'}) - - res = node.do_recover(self.context, action) - - self.assertFalse(res) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ERROR, - reason='Recovery failed')]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_failed_recover_with_old_physical_id(self, - mock_recover, - mock_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - - action = mock.Mock( - inputs={'operation': consts.RECOVER_RECREATE, 'check': True}) - mock_recover.side_effect = exception.EResourceOperation( - op=consts.RECOVER_RECREATE, - type='server', - id=node.physical_id, - resource_id=node.physical_id, - reason='Recovery failed', - ) - res = node.do_recover(self.context, action) - - self.assertFalse(res) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d" - "f9d1': Internal error happened.") - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ERROR, - reason=str(reason), - physical_id=node.physical_id)]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_failed_recover_with_new_physical_id(self, - mock_recover, - mock_status): - def set_status(*args, **kwargs): - if args[1] == consts.NS_ERROR: - node.physical_id = new_id - - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - - mock_status.side_effect = set_status - action = mock.Mock(inputs={'operation': consts.RECOVER_RECREATE, - 'check': True}) - mock_recover.side_effect = exception.EResourceOperation( - op=consts.RECOVER_RECREATE, - type='server', - id=node.physical_id, - resource_id=new_id, - reason='Recovery failed', - ) - res = node.do_recover(self.context, action) - - self.assertFalse(res) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - reason = ("Failed in RECREATE server 'd94d6333-82e6-4f87-b7ab-b786776d" - "f9d1': Internal error happened.") - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ERROR, - reason=str(reason), - physical_id=new_id)]) - - def test_node_recover_no_physical_id_reboot_op(self): - node = nodem.Node('node1', PROFILE_ID, None) - action = mock.Mock(inputs={'operation': 'REBOOT'}) - - res = node.do_recover(self.context, action) - - self.assertFalse(res) - - def test_node_recover_no_physical_id_rebuild_op(self): - node = nodem.Node('node1', PROFILE_ID, None) - action = mock.Mock(inputs={'operation': 'REBUILD'}) - - res = node.do_recover(self.context, action) - - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_no_physical_id_no_op(self, mock_recover, - mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'RECREATE'} - - node = nodem.Node('node1', PROFILE_ID, '', id='fake') - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.return_value = False - action = mock.Mock( - outputs={}, inputs={}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_not_called() - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - @mock.patch.object(pb.Profile, 'recover_object') - def test_node_recover_no_physical_id_recreate_op(self, mock_recover, - mock_status): - def set_status(*args, **kwargs): - if args[1] == 'ACTIVE': - node.physical_id = new_id - node.data = {'recovery': 'RECREATE'} - - node = nodem.Node('node1', PROFILE_ID, '', id='fake') - new_id = '166db83b-b4a4-49ef-96a8-6c0fdd882d1a' - mock_recover.return_value = new_id, True - mock_status.side_effect = set_status - mock_check = self.patchobject(pb.Profile, 'check_object') - mock_check.return_value = False - action = mock.Mock( - outputs={}, inputs={'operation': 'RECREATE', - 'check': True}) - - res = node.do_recover(self.context, action) - - self.assertTrue(res) - mock_check.assert_called_once_with(self.context, node) - mock_recover.assert_called_once_with( - self.context, node, **action.inputs) - self.assertEqual('node1', node.name) - self.assertEqual(new_id, node.physical_id) - self.assertEqual(PROFILE_ID, node.profile_id) - mock_status.assert_has_calls([ - mock.call(self.context, 'RECOVERING', - reason='Recovery in progress'), - mock.call(self.context, consts.NS_ACTIVE, - reason='Recovery succeeded', - physical_id=new_id, - data={'recovery': 'RECREATE'})]) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_recover_operation_not_support(self, mock_set_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - action = mock.Mock( - outputs={}, inputs={'operation': 'foo'}) - - res = node.do_recover(self.context, action) - self.assertEqual({}, action.outputs) - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_recover_operation_not_string(self, mock_set_status): - node = nodem.Node('node1', PROFILE_ID, None) - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - action = mock.Mock( - outputs={}, inputs={'operation': 'foo'}) - - res = node.do_recover(self.context, action) - self.assertEqual({}, action.outputs) - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_operation(self, mock_set_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - x_profile = mock.Mock() - x_profile.handle_dance = mock.Mock(return_value=True) - node.rt['profile'] = x_profile - - inputs = {'operation': 'dance', 'params': {'style': 'tango'}} - res = node.do_operation(self.context, **inputs) - - self.assertTrue(res) - mock_set_status.assert_has_calls([ - mock.call(self.context, consts.NS_OPERATING, - reason="Operation 'dance' in progress"), - mock.call(self.context, consts.NS_ACTIVE, - reason="Operation 'dance' succeeded") - ]) - x_profile.handle_dance.assert_called_once_with(node, style='tango') - - def test_node_operation_no_physical_id(self): - node = nodem.Node('node1', PROFILE_ID, None) - inputs = {'operation': 'dance', 'params': {'style': 'tango'}} - - res = node.do_operation(self.context, **inputs) - - self.assertFalse(res) - - @mock.patch.object(nodem.Node, 'set_status') - def test_node_operation_failed_op(self, mock_set_status): - node = nodem.Node('node1', PROFILE_ID, '') - node.physical_id = 'd94d6333-82e6-4f87-b7ab-b786776df9d1' - x_profile = mock.Mock() - err = exception.EResourceOperation( - op='dance', type='container', id='test_id', message='Boom') - x_profile.handle_dance = mock.Mock(side_effect=err) - node.rt['profile'] = x_profile - - inputs = {'operation': 'dance', 'params': {'style': 'tango'}} - res = node.do_operation(self.context, **inputs) - - self.assertFalse(res) - mock_set_status.assert_has_calls([ - mock.call(self.context, consts.NS_OPERATING, - reason="Operation 'dance' in progress"), - mock.call(self.context, consts.NS_ERROR, - reason="Failed in dance container 'test_id': Boom.") - ]) - x_profile.handle_dance.assert_called_once_with(node, style='tango') - - def _verify_execution_create_args(self, expected_name, - expected_inputs_dict, wfc): - wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY) - actual_call_args, call_kwargs = wfc.execution_create.call_args - - # execution_create parameters are name and inputs - actual_call_name, actual_call_inputs = actual_call_args - - # actual_call_inputs is string representation of a dictionary. - # convert actual_call_inputs to json, then dump it back as string - # sorted by key - final_actual_call_inputs = jsonutils.dumps( - jsonutils.loads(actual_call_inputs), sort_keys=True) - - # dump expected_inputs_dict as string sorted by key - final_expected_inputs = jsonutils.dumps( - expected_inputs_dict, sort_keys=True) - - # compare the sorted input strings along with the names - self.assertEqual(actual_call_name, expected_name) - self.assertEqual(final_actual_call_inputs, final_expected_inputs) - - def test_run_workflow(self): - node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') - node.physical_id = 'FAKE_NODE' - - wfc = mock.Mock() - wfc.workflow_find.return_value = None - wfc.workflow_create = mock.Mock() - wfc.execution_create = mock.Mock() - x_profile = mock.Mock() - x_profile.workflow = mock.Mock(return_value=wfc) - node.rt['profile'] = x_profile - - options = { - 'workflow_name': 'foo', - 'inputs': { - 'definition': { - 'bar': 'baz' - }, - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - } - - res = node.run_workflow(**options) - - self.assertTrue(res) - x_profile.workflow.assert_called_once_with(node) - wfc.workflow_find.assert_called_once_with('foo') - wfc.workflow_create.assert_called_once_with( - {'bar': 'baz'}, scope='private') - - final_dict = { - 'cluster_id': 'FAKE_CLUSTER', - 'node_id': 'FAKE_NODE', - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - self._verify_execution_create_args('foo', final_dict, wfc) - - def test_run_workflow_no_physical_id(self): - node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') - node.physical_id = None - - res = node.run_workflow() - - self.assertFalse(res) - - def test_run_workflow_workflow_is_found(self): - node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') - node.physical_id = 'FAKE_NODE' - - wfc = mock.Mock() - wfc.workflow_find.return_value = mock.Mock(definition={'bar': 'baz'}) - wfc.workflow_create = mock.Mock() - wfc.execution_create = mock.Mock() - x_profile = mock.Mock() - x_profile.workflow = mock.Mock(return_value=wfc) - node.rt['profile'] = x_profile - - options = { - 'workflow_name': 'foo', - 'inputs': { - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - } - - res = node.run_workflow(**options) - - self.assertTrue(res) - x_profile.workflow.assert_called_once_with(node) - wfc.workflow_find.assert_called_once_with('foo') - self.assertEqual(0, wfc.workflow_create.call_count) - final_dict = { - 'cluster_id': 'FAKE_CLUSTER', - 'node_id': 'FAKE_NODE', - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - self._verify_execution_create_args('foo', final_dict, wfc) - - def test_run_workflow_failed_creation(self): - node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') - node.physical_id = 'FAKE_NODE' - - wfc = mock.Mock() - wfc.workflow_find.return_value = None - err = exception.InternalError(message='boom') - wfc.workflow_create.side_effect = err - wfc.execution_create = mock.Mock() - x_profile = mock.Mock() - x_profile.workflow = mock.Mock(return_value=wfc) - node.rt['profile'] = x_profile - - options = { - 'workflow_name': 'foo', - 'inputs': { - 'definition': {'bar': 'baz'}, - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - } - - ex = self.assertRaises(exception.EResourceOperation, - node.run_workflow, - **options) - - self.assertEqual("Failed in executing workflow 'foo': boom.", - str(ex)) - x_profile.workflow.assert_called_once_with(node) - wfc.workflow_find.assert_called_once_with('foo') - wfc.workflow_create.assert_called_once_with( - {'bar': 'baz'}, scope='private') - self.assertEqual(0, wfc.execution_create.call_count) - - def test_run_workflow_failed_execution(self): - node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') - node.physical_id = 'FAKE_NODE' - - wfc = mock.Mock() - wfc.workflow_find.return_value = None - wfc.workflow_create = mock.Mock() - err = exception.InternalError(message='boom') - wfc.execution_create.side_effect = err - x_profile = mock.Mock() - x_profile.workflow = mock.Mock(return_value=wfc) - node.rt['profile'] = x_profile - - options = { - 'workflow_name': 'foo', - 'inputs': { - 'definition': {'bar': 'baz'}, - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - } - - ex = self.assertRaises(exception.EResourceOperation, - node.run_workflow, - **options) - - self.assertEqual("Failed in executing workflow 'foo': boom.", - str(ex)) - x_profile.workflow.assert_called_once_with(node) - wfc.workflow_find.assert_called_once_with('foo') - wfc.workflow_create.assert_called_once_with( - {'bar': 'baz'}, scope='private') - final_dict = { - 'cluster_id': 'FAKE_CLUSTER', - 'node_id': 'FAKE_NODE', - 'FAKE_KEY1': 'FAKE_VALUE1', - 'FAKE_KEY2': 'FAKE_VALUE2', - } - self._verify_execution_create_args('foo', final_dict, wfc) diff --git a/senlin/tests/unit/engine/test_registry.py b/senlin/tests/unit/engine/test_registry.py deleted file mode 100644 index 616ea3a26..000000000 --- a/senlin/tests/unit/engine/test_registry.py +++ /dev/null @@ -1,221 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.engine import registry -from senlin.tests.unit.common import base - - -class PluginInfoTest(base.SenlinTestCase): - - def setUp(self): - super(PluginInfoTest, self).setUp() - self.reg = registry.Registry('SENLIN', None) - - def test_create(self): - plugin = mock.Mock() - pi = registry.PluginInfo(self.reg, 'FOO', plugin) - - self.assertIsInstance(pi, registry.PluginInfo) - self.assertEqual(self.reg, pi.registry) - self.assertEqual('FOO', pi.name) - self.assertEqual(plugin, pi.plugin) - self.assertTrue(pi.user_provided) - - def test_eq_ne(self): - plugin1 = mock.Mock() - plugin2 = mock.Mock() - pi1 = registry.PluginInfo(self.reg, 'FOO', plugin1) - pi2 = registry.PluginInfo(self.reg, 'FOO', plugin1) - pi3 = registry.PluginInfo(self.reg, 'BAR', plugin1) - pi4 = registry.PluginInfo(self.reg, 'FOO', plugin2) - - self.assertIsNotNone(pi1) - self.assertEqual(pi1, pi2) - self.assertNotEqual(pi1, pi3) - self.assertNotEqual(pi1, pi4) - self.assertNotEqual(pi2, pi4) - self.assertNotEqual(pi3, pi4) - self.assertEqual(pi1, pi2) - self.assertNotEqual(pi1, pi3) - self.assertNotEqual(pi1, pi4) - self.assertNotEqual(pi2, pi4) - self.assertNotEqual(pi3, pi4) - self.assertIsNotNone(pi1) - - def test_ordering(self): - plugin1 = mock.Mock() - plugin2 = mock.Mock() - pi1 = registry.PluginInfo(self.reg, 'FOO', plugin1) - pi2 = registry.PluginInfo(self.reg, 'FOO', plugin1) - pi2.user_provided = False - self.assertLess(pi1, pi2) - - pi3 = registry.PluginInfo(self.reg, 'FOO_LONG', plugin1) - self.assertLess(pi3, pi1) - - pi4 = registry.PluginInfo(self.reg, 'BAR', plugin2) - self.assertLess(pi4, pi1) - self.assertNotEqual(pi4, pi1) - - def test_str(self): - plugin = mock.Mock() - pi = registry.PluginInfo(self.reg, 'FOO', plugin) - expected = '[Plugin](User:True) FOO -> %s' % str(plugin) - self.assertEqual(expected, str(pi)) - - -class RegistryTest(base.SenlinTestCase): - - def test_create(self): - reg = registry.Registry('SENLIN', None) - self.assertEqual('SENLIN', reg.registry_name) - self.assertEqual({}, reg._registry) - self.assertTrue(reg.is_global) - self.assertIsNone(reg.global_registry) - - reg_sub = registry.Registry('SUB', reg) - self.assertEqual('SUB', reg_sub.registry_name) - self.assertEqual({}, reg_sub._registry) - self.assertFalse(reg_sub.is_global) - self.assertEqual(reg, reg_sub.global_registry) - - def test_register_info(self): - reg = registry.Registry('SENLIN', None) - plugin = mock.Mock() - pi = registry.PluginInfo(reg, 'FOO', plugin) - reg._register_info('FOO', pi) - - result = reg._registry.get('FOO') - self.assertEqual(pi, result) - - # register the same name and same PluginInfo, no new entry added - reg._register_info('FOO', pi) - self.assertEqual(1, len(reg._registry)) - - # register the same name with different PluginInfo -> replacement - new_pi = registry.PluginInfo(reg, 'FOO', plugin) - reg._register_info('FOO', new_pi) - self.assertEqual(1, len(reg._registry)) - - # additional check: this is a global registry - self.assertFalse(new_pi.user_provided) - - # removal - reg._register_info('FOO', None) - self.assertEqual(0, len(reg._registry)) - - def test_register_plugin(self): - reg = registry.Registry('SENLIN', None) - plugin = mock.Mock() - reg.register_plugin('FOO', plugin) - - pi = reg._registry.get('FOO') - self.assertIsInstance(pi, registry.PluginInfo) - self.assertEqual(plugin, pi.plugin) - self.assertEqual('FOO', pi.name) - - def test_load(self): - snippet = { - 'K2': 'Class2', - 'K4': 'Class4', - 'K5': 'Class5', - } - - reg = registry.Registry('SENLIN', None) - reg.load(snippet) - pi2 = reg._registry.get('K2') - self.assertIsInstance(pi2, registry.PluginInfo) - self.assertEqual('K2', pi2.name) - self.assertEqual('Class2', pi2.plugin) - pi4 = reg._registry.get('K4') - self.assertIsInstance(pi4, registry.PluginInfo) - self.assertEqual('K4', pi4.name) - self.assertEqual('Class4', pi4.plugin) - pi5 = reg._registry.get('K5') - self.assertIsInstance(pi5, registry.PluginInfo) - self.assertEqual('K5', pi5.name) - self.assertEqual('Class5', pi5.plugin) - - # load with None - snippet = { - 'K5': None - } - reg.load(snippet) - res = reg._registry.get('K5') - self.assertIsNone(res) - - def test_iterable_by(self): - reg = registry.Registry('GLOBAL', None) - plugin = mock.Mock() - reg.register_plugin('FOO', plugin) - - res = [r for r in reg.iterable_by('FOO')] - self.assertEqual(1, len(res)) - self.assertEqual('FOO', res[0].name) - - def test_get_plugin(self): - # Global registry - reg = registry.Registry('GLOBAL', None) - self.assertTrue(reg.is_global) - - # Register plugin in global - plugin = mock.Mock() - reg.register_plugin('FOO', plugin) - self.assertEqual(plugin, reg.get_plugin('FOO')) - - # Sub registry - sub = registry.Registry('SUB', reg) - self.assertFalse(sub.is_global) - - # Retrieve plugin from global registry - self.assertEqual(plugin, sub.get_plugin('FOO')) - - # Plugin in local registry overrides that in the global registry - plugin_new = mock.Mock() - sub.register_plugin('FOO', plugin_new) - self.assertEqual(plugin_new, sub.get_plugin('FOO')) - - def test_as_dict(self): - reg = registry.Registry('GLOBAL', None) - plugin1 = mock.Mock() - reg.register_plugin('FOO', plugin1) - plugin2 = mock.Mock() - reg.register_plugin('BAR', plugin2) - - res = reg.as_dict() - self.assertIsInstance(res, dict) - self.assertEqual(plugin1, res.get('FOO')) - self.assertEqual(plugin2, res.get('BAR')) - - def test_get_types(self): - reg = registry.Registry('GLOBAL', None) - plugin1 = mock.Mock(VERSIONS={'1.0': 'bar'}) - reg.register_plugin('FOO-1.0', plugin1) - plugin2 = mock.Mock(VERSIONS={'1.1': 'car'}) - reg.register_plugin('BAR-1.1', plugin2) - - self.assertIn( - { - 'name': 'FOO', - 'version': '1.0', - 'support_status': {'1.0': 'bar'} - }, - reg.get_types()) - self.assertIn( - { - 'name': 'BAR', - 'version': '1.1', - 'support_status': {'1.1': 'car'} - }, - reg.get_types()) diff --git a/senlin/tests/unit/engine/test_senlin_lock.py b/senlin/tests/unit/engine/test_senlin_lock.py deleted file mode 100644 index 5c945a5f9..000000000 --- a/senlin/tests/unit/engine/test_senlin_lock.py +++ /dev/null @@ -1,190 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import utils as common_utils -from senlin.engine import senlin_lock as lockm -from senlin.objects import action as ao -from senlin.objects import cluster_lock as clo -from senlin.objects import node_lock as nlo -from senlin.objects import service as svco -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class SenlinLockTest(base.SenlinTestCase): - - def setUp(self): - super(SenlinLockTest, self).setUp() - - self.ctx = utils.dummy_context() - - ret = mock.Mock(owner='ENGINE', id='ACTION_ABC') - self.stub_get = self.patchobject(ao.Action, 'get', return_value=ret) - - @mock.patch.object(clo.ClusterLock, "acquire") - def test_cluster_lock_acquire_already_owner(self, mock_acquire): - mock_acquire.return_value = ['ACTION_XYZ'] - - res = lockm.cluster_lock_acquire(self.ctx, 'CLUSTER_A', 'ACTION_XYZ') - - self.assertTrue(res) - mock_acquire.assert_called_once_with('CLUSTER_A', 'ACTION_XYZ', - lockm.CLUSTER_SCOPE) - - @mock.patch.object(common_utils, 'is_service_dead') - @mock.patch.object(svco.Service, 'gc_by_engine') - @mock.patch.object(clo.ClusterLock, "acquire") - @mock.patch.object(clo.ClusterLock, "steal") - def test_cluster_lock_acquire_dead_owner(self, mock_steal, mock_acquire, - mock_gc, mock_dead): - mock_dead.return_value = True - mock_acquire.return_value = ['ACTION_ABC'] - mock_steal.return_value = ['ACTION_XYZ'] - - res = lockm.cluster_lock_acquire(self.ctx, 'CLUSTER_A', 'ACTION_XYZ', - 'NEW_ENGINE') - - self.assertTrue(res) - mock_acquire.assert_called_with("CLUSTER_A", "ACTION_XYZ", - lockm.CLUSTER_SCOPE) - self.assertEqual(3, mock_acquire.call_count) - mock_steal.assert_called_once_with('CLUSTER_A', 'ACTION_XYZ') - mock_gc.assert_called_once_with(mock.ANY) - - @mock.patch.object(common_utils, 'is_service_dead') - @mock.patch.object(clo.ClusterLock, "acquire") - def test_cluster_lock_acquire_failed(self, mock_acquire, mock_dead): - mock_dead.return_value = False - mock_acquire.return_value = ['ACTION_ABC'] - - res = lockm.cluster_lock_acquire(self.ctx, 'CLUSTER_A', 'ACTION_XYZ') - - self.assertFalse(res) - mock_acquire.assert_called_with('CLUSTER_A', 'ACTION_XYZ', - lockm.CLUSTER_SCOPE) - self.assertEqual(3, mock_acquire.call_count) - - @mock.patch.object(clo.ClusterLock, "acquire") - @mock.patch.object(clo.ClusterLock, "steal") - def test_cluster_lock_acquire_forced(self, mock_steal, mock_acquire): - mock_acquire.return_value = ['ACTION_ABC'] - mock_steal.return_value = ['ACTION_XY'] - - res = lockm.cluster_lock_acquire(self.ctx, 'CLUSTER_A', - 'ACTION_XY', forced=True) - - self.assertTrue(res) - mock_acquire.assert_called_with('CLUSTER_A', 'ACTION_XY', - lockm.CLUSTER_SCOPE) - self.assertEqual(3, mock_acquire.call_count) - mock_steal.assert_called_once_with('CLUSTER_A', 'ACTION_XY') - - @mock.patch.object(common_utils, 'is_service_dead') - @mock.patch.object(clo.ClusterLock, "acquire") - @mock.patch.object(clo.ClusterLock, "steal") - def test_cluster_lock_acquire_steal_failed(self, mock_steal, mock_acquire, - mock_dead): - mock_dead.return_value = False - mock_acquire.return_value = ['ACTION_ABC'] - mock_steal.return_value = [] - - res = lockm.cluster_lock_acquire(self.ctx, 'CLUSTER_A', - 'ACTION_XY', forced=True) - - self.assertFalse(res) - mock_acquire.assert_called_with('CLUSTER_A', 'ACTION_XY', - lockm.CLUSTER_SCOPE) - self.assertEqual(3, mock_acquire.call_count) - mock_steal.assert_called_once_with('CLUSTER_A', 'ACTION_XY') - - @mock.patch.object(clo.ClusterLock, "release") - def test_cluster_lock_release(self, mock_release): - actual = lockm.cluster_lock_release('C', 'A', 'S') - - self.assertEqual(mock_release.return_value, actual) - mock_release.assert_called_once_with('C', 'A', 'S') - - @mock.patch.object(nlo.NodeLock, "acquire") - def test_node_lock_acquire_already_owner(self, mock_acquire): - mock_acquire.return_value = 'ACTION_XYZ' - - res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ') - - self.assertTrue(res) - mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ') - - @mock.patch.object(common_utils, 'is_service_dead') - @mock.patch.object(ao.Action, 'mark_failed') - @mock.patch.object(nlo.NodeLock, "acquire") - @mock.patch.object(nlo.NodeLock, "steal") - def test_node_lock_acquire_dead_owner(self, mock_steal, mock_acquire, - mock_action_fail, mock_dead): - mock_dead.return_value = True - mock_acquire.side_effect = ['ACTION_ABC'] - mock_steal.return_value = 'ACTION_XYZ' - - res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ', - 'NEW_ENGINE') - - self.assertTrue(res) - mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ') - mock_steal.assert_called_once_with('NODE_A', 'ACTION_XYZ') - mock_action_fail.assert_called_once_with( - self.ctx, 'ACTION_ABC', mock.ANY, - 'Engine died when executing this action.') - - @mock.patch.object(common_utils, 'is_service_dead') - @mock.patch.object(nlo.NodeLock, "acquire") - def test_node_lock_acquire_failed(self, mock_acquire, mock_dead): - mock_dead.return_value = False - mock_acquire.side_effect = ['ACTION_ABC'] - - res = lockm.node_lock_acquire(self.ctx, 'NODE_A', 'ACTION_XYZ') - - self.assertFalse(res) - mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XYZ') - - @mock.patch.object(nlo.NodeLock, "acquire") - @mock.patch.object(nlo.NodeLock, "steal") - def test_node_lock_acquire_forced(self, mock_steal, mock_acquire): - mock_acquire.side_effect = ['ACTION_ABC', 'ACTION_ABC', 'ACTION_ABC'] - mock_steal.return_value = 'ACTION_XY' - - res = lockm.node_lock_acquire(self.ctx, 'NODE_A', - 'ACTION_XY', forced=True) - - self.assertTrue(res) - mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XY') - mock_steal.assert_called_once_with('NODE_A', 'ACTION_XY') - - @mock.patch.object(ao.Action, 'get') - @mock.patch.object(nlo.NodeLock, "acquire") - @mock.patch.object(nlo.NodeLock, "steal") - def test_node_lock_acquire_steal_failed(self, mock_steal, mock_acquire, - mock_get): - mock_acquire.side_effect = ['ACTION_ABC'] - mock_steal.return_value = None - - res = lockm.node_lock_acquire(self.ctx, 'NODE_A', - 'ACTION_XY', forced=True) - - self.assertFalse(res) - mock_acquire.assert_called_once_with('NODE_A', 'ACTION_XY') - mock_steal.assert_called_once_with('NODE_A', 'ACTION_XY') - - @mock.patch.object(nlo.NodeLock, "release") - def test_node_lock_release(self, mock_release): - actual = lockm.node_lock_release('C', 'A') - self.assertEqual(mock_release.return_value, actual) - mock_release.assert_called_once_with('C', 'A') diff --git a/senlin/tests/unit/engine/test_service.py b/senlin/tests/unit/engine/test_service.py deleted file mode 100644 index 8336bab2c..000000000 --- a/senlin/tests/unit/engine/test_service.py +++ /dev/null @@ -1,396 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -from unittest import mock - -from oslo_config import cfg -from oslo_context import context as oslo_context -import oslo_messaging -from oslo_service import threadgroup -from oslo_utils import uuidutils -from osprofiler import profiler - -from senlin.common import consts -from senlin.common import messaging -from senlin.db import api as db_api -from senlin.engine.actions import base as actionm -from senlin.engine import dispatcher -from senlin.engine import service -from senlin.objects import service as service_obj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class DummyThread(object): - def __init__(self, function, *args, **kwargs): - self.function = function - - -class DummyThreadGroup(object): - def __init__(self): - self.threads = [] - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - self.threads.append(callback) - - def stop_timers(self): - pass - - def add_thread(self, callback, cnxt, trace, func, *args, **kwargs): - # callback here is _start_with_trace, func is the 'real' callback - self.threads.append(func) - return DummyThread(func) - - def stop(self, graceful=False): - pass - - def wait(self): - pass - - -class TestEngine(base.SenlinTestCase): - def setUp(self): - super(TestEngine, self).setUp() - self.context = utils.dummy_context() - - self.service_id = '4db0a14c-dc10-4131-8ed6-7573987ce9b0' - self.tg = mock.Mock() - self.topic = consts.ENGINE_TOPIC - - self.tg = mock.Mock() - self.svc = service.EngineService('HOST', self.topic) - self.svc.service_id = self.service_id - self.svc.tg = self.tg - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers(self, mock_service_init): - service.EngineService('HOST', self.topic) - - mock_service_init.assert_called_once_with(1000) - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers_override(self, mock_service_init): - cfg.CONF.set_override('threads', 100, group='engine') - - service.EngineService('HOST', self.topic) - - mock_service_init.assert_called_once_with(100) - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers_override_legacy(self, mock_service_init): - cfg.CONF.set_override('scheduler_thread_pool_size', 101) - - service.EngineService('HOST', self.topic) - - mock_service_init.assert_called_once_with(101) - - def test_init(self): - self.assertEqual(self.service_id, self.svc.service_id) - self.assertEqual(self.tg, self.svc.tg) - self.assertEqual(self.topic, self.svc.topic) - - @mock.patch.object(uuidutils, 'generate_uuid') - @mock.patch.object(oslo_messaging, 'get_rpc_server') - @mock.patch.object(service_obj.Service, 'create') - def test_service_start(self, mock_service_create, mock_rpc_server, - mock_uuid): - service_uuid = '4db0a14c-dc10-4131-8ed6-7573987ce9b1' - mock_uuid.return_value = service_uuid - - self.svc.start() - - mock_uuid.assert_called_once() - mock_service_create.assert_called_once() - self.svc.server.start.assert_called_once() - - self.assertEqual(service_uuid, self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop(self, mock_delete): - self.svc.server = mock.Mock() - - self.svc.stop() - - self.svc.server.stop.assert_called_once() - self.svc.server.wait.assert_called_once() - - mock_delete.assert_called_once_with(self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop_not_yet_started(self, mock_delete): - self.svc.server = None - - self.svc.stop() - - mock_delete.assert_called_once_with(self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_update(self, mock_update): - mock_update.return_value = mock.Mock() - self.svc.service_manage_report() - mock_update.assert_called_once_with(mock.ANY, - self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_with_exception(self, mock_update): - mock_update.side_effect = Exception('blah') - self.svc.service_manage_report() - self.assertEqual(mock_update.call_count, 1) - - def test_listening(self): - self.assertTrue(self.svc.listening(self.context)) - - @mock.patch.object(oslo_context, 'get_current') - @mock.patch.object(messaging, 'get_rpc_client') - def test_notify_broadcast(self, mock_rpc, mock_get_current): - cfg.CONF.set_override('host', 'HOSTNAME') - fake_ctx = mock.Mock() - mock_get_current.return_value = fake_ctx - mock_rpc.return_value = mock.Mock() - - dispatcher.notify('METHOD') - - mock_rpc.assert_called_once_with(consts.ENGINE_TOPIC, 'HOSTNAME') - mock_client = mock_rpc.return_value - mock_client.prepare.assert_called_once_with(fanout=True) - - mock_context = mock_client.prepare.return_value - mock_context.cast.assert_called_once_with(fake_ctx, 'METHOD') - - @mock.patch.object(oslo_context, 'get_current') - @mock.patch.object(messaging, 'get_rpc_client') - def test_notify_single_server(self, mock_rpc, mock_get_current): - cfg.CONF.set_override('host', 'HOSTNAME') - fake_ctx = mock.Mock() - mock_get_current.return_value = fake_ctx - mock_rpc.return_value = mock.Mock() - - result = dispatcher.notify('METHOD', 'FAKE_ENGINE') - - self.assertTrue(result) - mock_rpc.assert_called_once_with(consts.ENGINE_TOPIC, 'HOSTNAME') - mock_client = mock_rpc.return_value - mock_client.prepare.assert_called_once_with(server='FAKE_ENGINE') - - mock_context = mock_client.prepare.return_value - mock_context.cast.assert_called_once_with(fake_ctx, 'METHOD') - - @mock.patch.object(messaging, 'get_rpc_client') - def test_notify_timeout(self, mock_rpc): - cfg.CONF.set_override('host', 'HOSTNAME') - mock_rpc.return_value = mock.Mock() - mock_client = mock_rpc.return_value - mock_context = mock_client.prepare.return_value - mock_context.cast.side_effect = oslo_messaging.MessagingTimeout - - result = dispatcher.notify('METHOD') - - self.assertFalse(result) - mock_rpc.assert_called_once_with(consts.ENGINE_TOPIC, 'HOSTNAME') - mock_client.prepare.assert_called_once_with(fanout=True) - - mock_context.cast.assert_called_once_with(mock.ANY, 'METHOD') - - @mock.patch.object(profiler, 'get') - def test_serialize_profile_info(self, mock_profiler_get): - mock_profiler_get.return_value = None - - self.assertIsNone(self.svc._serialize_profile_info()) - - @mock.patch.object(profiler, 'get') - def test_serialize_profile_info_with_profile(self, mock_profiler_get): - mock_result = mock.Mock() - mock_result.hmac_key = 'hmac_key' - mock_result.get_base_id.return_value = 'get_base_id' - mock_result.get_id.return_value = 'get_id' - - mock_profiler_get.return_value = mock_result - result = self.svc._serialize_profile_info() - - self.assertEqual( - { - 'base_id': 'get_base_id', - 'hmac_key': 'hmac_key', - 'parent_id': 'get_id' - }, - result - ) - - @mock.patch.object(profiler, 'init') - def test_start_with_trace(self, mock_profiler_init): - self.assertIsNotNone( - self.svc._start_with_trace( - self.context, {'hmac_key': mock.Mock()}, mock.Mock() - ) - ) - - -class DispatcherActionTest(base.SenlinTestCase): - def setUp(self): - super(DispatcherActionTest, self).setUp() - self.context = utils.dummy_context() - self.fake_tg = DummyThreadGroup() - - self.mock_tg = self.patchobject(threadgroup, 'ThreadGroup') - self.mock_tg.return_value = self.fake_tg - - @mock.patch.object(db_api, 'action_acquire_first_ready') - @mock.patch.object(db_api, 'action_acquire') - def test_start_action(self, mock_action_acquire, - mock_action_acquire_1st): - action = mock.Mock() - action.id = '0123' - mock_action_acquire.return_value = action - mock_action_acquire_1st.return_value = None - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.start_action('4567', '0123') - - self.mock_tg.add_thread.assert_called_once_with( - svc._start_with_trace, - oslo_context.get_current(), - None, actionm.ActionProc, - svc.db_session, '0123' - ) - - @mock.patch.object(db_api, 'action_acquire_first_ready') - def test_start_action_no_action_id(self, mock_acquire_action): - mock_action = mock.Mock() - mock_action.id = '0123' - mock_action.action = 'CLUSTER_CREATE' - mock_acquire_action.side_effect = [mock_action, None] - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.start_action('4567') - - self.mock_tg.add_thread.assert_called_once_with( - svc._start_with_trace, - oslo_context.get_current(), - None, actionm.ActionProc, - svc.db_session, '0123' - ) - - @mock.patch.object(service, 'sleep') - @mock.patch.object(db_api, 'action_acquire_first_ready') - def test_start_action_batch_control(self, mock_acquire_action, mock_sleep): - mock_action1 = mock.Mock() - mock_action1.id = 'ID1' - mock_action1.action = 'NODE_CREATE' - mock_action2 = mock.Mock() - mock_action2.id = 'ID2' - mock_action2.action = 'CLUSTER_CREATE' - mock_action3 = mock.Mock() - mock_action3.id = 'ID3' - mock_action3.action = 'NODE_DELETE' - mock_acquire_action.side_effect = [mock_action1, mock_action2, - mock_action3, None] - cfg.CONF.set_override('max_actions_per_batch', 1) - cfg.CONF.set_override('batch_interval', 2) - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.start_action('4567') - - mock_sleep.assert_called_once_with(2) - self.assertEqual(self.mock_tg.add_thread.call_count, 3) - - @mock.patch.object(service, 'sleep') - @mock.patch.object(db_api, 'action_acquire_first_ready') - def test_start_action_multiple_batches(self, mock_acquire_action, - mock_sleep): - action_types = ['NODE_CREATE', 'NODE_DELETE'] - actions = [] - for index in range(10): - mock_action = mock.Mock() - mock_action.id = 'ID%d' % (index + 1) - mock_action.action = action_types[index % 2] - actions.append(mock_action) - - # Add a None at the end to end the process. - actions.insert(len(actions), None) - - mock_acquire_action.side_effect = actions - cfg.CONF.set_override('max_actions_per_batch', 3) - cfg.CONF.set_override('batch_interval', 5) - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.start_action(self.context) - - self.assertEqual(mock_sleep.call_count, 3) - self.assertEqual(self.mock_tg.add_thread.call_count, 10) - - @mock.patch.object(db_api, 'action_acquire_first_ready') - @mock.patch.object(db_api, 'action_acquire') - def test_start_action_failed_locking_action(self, mock_acquire_action, - mock_acquire_action_1st): - mock_acquire_action.return_value = None - mock_acquire_action_1st.return_value = None - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - res = svc.start_action(self.context, '0123') - self.assertIsNone(res) - - @mock.patch.object(db_api, 'action_acquire_first_ready') - def test_start_action_no_action_ready(self, mock_acquire_action): - mock_acquire_action.return_value = None - - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - res = svc.start_action('4567') - self.assertIsNone(res) - - def test_cancel_action(self): - mock_action = mock.Mock() - mock_load = self.patchobject(actionm.Action, 'load', - return_value=mock_action) - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.cancel_action(self.context, 'action0123') - - mock_load.assert_called_once_with(svc.db_session, 'action0123', - project_safe=False) - mock_action.signal.assert_called_once_with(mock_action.SIG_CANCEL) - - def test_suspend_action(self): - mock_action = mock.Mock() - mock_load = self.patchobject(actionm.Action, 'load', - return_value=mock_action) - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.suspend_action(self.context, 'action0123') - - mock_load.assert_called_once_with(svc.db_session, 'action0123', - project_safe=False) - mock_action.signal.assert_called_once_with(mock_action.SIG_SUSPEND) - - def test_resume_action(self): - mock_action = mock.Mock() - mock_load = self.patchobject(actionm.Action, 'load', - return_value=mock_action) - svc = service.EngineService('HOST', 'TOPIC') - svc.tg = self.mock_tg - svc.resume_action(self.context, 'action0123') - - mock_load.assert_called_once_with(svc.db_session, 'action0123', - project_safe=False) - mock_action.signal.assert_called_once_with(mock_action.SIG_RESUME) - - def test_sleep(self): - mock_sleep = self.patchobject(eventlet, 'sleep') - service.sleep(1) - mock_sleep.assert_called_once_with(1) diff --git a/senlin/tests/unit/events/__init__.py b/senlin/tests/unit/events/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/events/test_base.py b/senlin/tests/unit/events/test_base.py deleted file mode 100644 index 0b1aa2dfb..000000000 --- a/senlin/tests/unit/events/test_base.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -from unittest import mock - -from senlin.common import consts -from senlin.events import base -from senlin.tests.unit.common import utils - -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - - -class TestEventBackend(testtools.TestCase): - - def setUp(self): - super(TestEventBackend, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch('oslo_utils.reflection.get_class_name') - def test_check_entity_cluster(self, mock_get): - entity = mock.Mock() - mock_get.return_value = 'Cluster' - - res = base.EventBackend._check_entity(entity) - - self.assertEqual('CLUSTER', res) - mock_get.assert_called_once_with(entity, fully_qualified=False) - - @mock.patch('oslo_utils.reflection.get_class_name') - def test_check_entity_node(self, mock_get): - entity = mock.Mock() - mock_get.return_value = 'Node' - - res = base.EventBackend._check_entity(entity) - - self.assertEqual('NODE', res) - mock_get.assert_called_once_with(entity, fully_qualified=False) - - def test_get_action_name_unexpected(self): - action = mock.Mock(action="UNEXPECTED") - res = base.EventBackend._get_action_name(action) - self.assertEqual('unexpected', res) - - def test_get_action_name_correct_format(self): - action = mock.Mock(action="FOO_BAR") - res = base.EventBackend._get_action_name(action) - self.assertEqual('bar', res) - - def test_get_action_name_operation_found(self): - action = mock.Mock(action=consts.NODE_OPERATION, - inputs={'operation': 'bar'}) - res = base.EventBackend._get_action_name(action) - self.assertEqual('bar', res) - - def test_get_action_name_operation_not_found(self): - action = mock.Mock(action="FOO_OPERATION", inputs={}) - res = base.EventBackend._get_action_name(action) - self.assertEqual('operation', res) - - def test_dump(self): - self.assertRaises(NotImplementedError, - base.EventBackend.dump, - '1', '2') diff --git a/senlin/tests/unit/events/test_database.py b/senlin/tests/unit/events/test_database.py deleted file mode 100644 index 8b4adbdbe..000000000 --- a/senlin/tests/unit/events/test_database.py +++ /dev/null @@ -1,122 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -from unittest import mock - -from senlin.common import consts -from senlin.events import base -from senlin.events import database as DB -from senlin.objects import event as eo -from senlin.tests.unit.common import utils - -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - - -class TestDatabase(testtools.TestCase): - - def setUp(self): - super(TestDatabase, self).setUp() - self.context = utils.dummy_context() - - @mock.patch.object(base.EventBackend, '_check_entity') - @mock.patch.object(eo.Event, 'create') - def test_dump(self, mock_create, mock_check): - mock_check.return_value = 'CLUSTER' - entity = mock.Mock(id='CLUSTER_ID') - entity.name = 'cluster1' - action = mock.Mock(context=self.context, action='ACTION', - entity=entity) - - res = DB.DBEvent.dump('LEVEL', action, phase='STATUS', reason='REASON') - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_create.assert_called_once_with( - self.context, - { - 'level': 'LEVEL', - 'timestamp': mock.ANY, - 'oid': 'CLUSTER_ID', - 'otype': 'CLUSTER', - 'oname': 'cluster1', - 'cluster_id': 'CLUSTER_ID', - 'user': self.context.user_id, - 'project': self.context.project_id, - 'action': 'ACTION', - 'status': 'STATUS', - 'status_reason': 'REASON', - 'meta_data': {} - }) - - @mock.patch.object(base.EventBackend, '_check_entity') - @mock.patch.object(eo.Event, 'create') - def test_dump_with_extra_but_no_status_(self, mock_create, mock_check): - mock_check.return_value = 'NODE' - entity = mock.Mock(id='NODE_ID', status='S1', status_reason='R1', - cluster_id='CLUSTER_ID') - entity.name = 'node1' - - action = mock.Mock(context=self.context, entity=entity, - action='ACTION') - - res = DB.DBEvent.dump('LEVEL', action, timestamp='NOW', - extra={'foo': 'bar'}) - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_create.assert_called_once_with( - self.context, - { - 'level': 'LEVEL', - 'timestamp': 'NOW', - 'oid': 'NODE_ID', - 'otype': 'NODE', - 'oname': 'node1', - 'cluster_id': 'CLUSTER_ID', - 'user': self.context.user_id, - 'project': self.context.project_id, - 'action': 'ACTION', - 'status': 'S1', - 'status_reason': 'R1', - 'meta_data': {'foo': 'bar'} - }) - - @mock.patch.object(base.EventBackend, '_check_entity') - @mock.patch.object(eo.Event, 'create') - def test_dump_operation_action(self, mock_create, mock_check): - mock_check.return_value = 'CLUSTER' - entity = mock.Mock(id='CLUSTER_ID') - entity.name = 'cluster1' - action = mock.Mock(context=self.context, action=consts.NODE_OPERATION, - entity=entity, inputs={'operation': 'dance'}) - - res = DB.DBEvent.dump('LEVEL', action, phase='STATUS', reason='REASON') - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_create.assert_called_once_with( - self.context, - { - 'level': 'LEVEL', - 'timestamp': mock.ANY, - 'oid': 'CLUSTER_ID', - 'otype': 'CLUSTER', - 'oname': 'cluster1', - 'cluster_id': 'CLUSTER_ID', - 'user': self.context.user_id, - 'project': self.context.project_id, - 'action': 'dance', - 'status': 'STATUS', - 'status_reason': 'REASON', - 'meta_data': {} - }) diff --git a/senlin/tests/unit/events/test_message.py b/senlin/tests/unit/events/test_message.py deleted file mode 100644 index 73bfc34af..000000000 --- a/senlin/tests/unit/events/test_message.py +++ /dev/null @@ -1,267 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import uuidutils -import testtools - -from senlin.engine.actions import base as action_base -from senlin.engine import cluster -from senlin.engine import node -from senlin.events import base -from senlin.events import message as MSG -from senlin.objects import notification as nobj -from senlin.tests.unit.common import utils - -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - - -class TestMessageEvent(testtools.TestCase): - - def setUp(self): - super(TestMessageEvent, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(nobj.NotificationBase, '_emit') - def test_notify_cluster_action(self, mock_emit): - cluster_id = uuidutils.generate_uuid() - profile_id = uuidutils.generate_uuid() - cluster_init = timeutils.utcnow(True) - action_id = uuidutils.generate_uuid() - cluster_params = { - 'id': cluster_id, - 'init_at': cluster_init, - 'min_size': 1, - 'max_size': 10, - 'timeout': 4, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - c1 = cluster.Cluster('fake_name', 5, profile_id, **cluster_params) - action_params = { - 'id': action_id, - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - action = action_base.Action(cluster_id, 'CLUSTER_CREATE', self.ctx, - **action_params) - publisher_id = 'senlin-engine:%s' % cfg.CONF.host - expected_payload = { - 'senlin_object.data': { - 'action': { - 'senlin_object.data': { - 'action': 'CLUSTER_CREATE', - 'created_at': None, - 'data': '{}', - 'end_time': None, - 'id': action_id, - 'inputs': '{}', - 'name': 'fake_name', - 'outputs': '{}', - 'project': self.ctx.project_id, - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'target': cluster_id, - 'timeout': 3600, - 'user': self.ctx.user_id, - }, - 'senlin_object.name': 'ActionPayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - }, - 'cluster': { - 'senlin_object.data': { - 'created_at': None, - 'data': '{}', - 'dependents': '{}', - 'desired_capacity': 5, - 'domain': '', - 'id': cluster_id, - 'init_at': mock.ANY, - 'max_size': 10, - 'metadata': '{}', - 'min_size': 1, - 'name': 'fake_name', - 'profile_id': profile_id, - 'project': u'project1', - 'status': u'ACTIVE', - 'status_reason': u'Good', - 'timeout': 4, - 'updated_at': None, - 'user': u'user1' - }, - 'senlin_object.name': 'ClusterPayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - }, - 'exception': None - }, - 'senlin_object.name': 'ClusterActionPayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - } - - res = MSG.MessageEvent._notify_cluster_action( - self.ctx, logging.INFO, c1, action, phase='start') - - self.assertIsNone(res) - - mock_emit.assert_called_once_with( - self.ctx, 'cluster.create.start', publisher_id, mock.ANY) - payload = mock_emit.call_args[0][3] - self.assertEqual(expected_payload, payload) - - @mock.patch.object(nobj.NotificationBase, '_emit') - def test_notify_node_action(self, mock_emit): - node_id = uuidutils.generate_uuid() - profile_id = uuidutils.generate_uuid() - node_init = timeutils.utcnow(True) - action_id = uuidutils.generate_uuid() - node_params = { - 'id': node_id, - 'cluster_id': '', - 'index': -1, - 'init_at': node_init, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - n1 = node.Node('fake_name', profile_id, **node_params) - action_params = { - 'id': action_id, - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - } - action = action_base.Action(node_id, 'NODE_CREATE', self.ctx, - **action_params) - publisher_id = 'senlin-engine:%s' % cfg.CONF.host - expected_payload = { - 'senlin_object.data': { - 'action': { - 'senlin_object.data': { - 'action': 'NODE_CREATE', - 'created_at': None, - 'data': '{}', - 'end_time': None, - 'id': action_id, - 'inputs': '{}', - 'name': 'fake_name', - 'outputs': '{}', - 'project': self.ctx.project_id, - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'target': node_id, - 'timeout': 3600, - 'user': self.ctx.user_id, - }, - 'senlin_object.name': 'ActionPayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - }, - 'node': { - 'senlin_object.data': { - 'cluster_id': '', - 'created_at': None, - 'data': '{}', - 'dependents': '{}', - 'domain': '', - 'id': node_id, - 'index': -1, - 'init_at': mock.ANY, - 'metadata': '{}', - 'name': 'fake_name', - 'physical_id': None, - 'profile_id': profile_id, - 'project': 'project1', - 'role': '', - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'updated_at': None, - 'user': 'user1', - }, - 'senlin_object.name': 'NodePayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - }, - 'exception': None - }, - 'senlin_object.name': 'NodeActionPayload', - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': '1.0' - } - - res = MSG.MessageEvent._notify_node_action( - self.ctx, logging.INFO, n1, action, phase='start') - - self.assertIsNone(res) - - mock_emit.assert_called_once_with( - self.ctx, 'node.create.start', publisher_id, mock.ANY) - payload = mock_emit.call_args[0][3] - self.assertEqual(expected_payload, payload) - - @mock.patch.object(MSG.MessageEvent, '_notify_cluster_action') - @mock.patch.object(base.EventBackend, '_check_entity') - def test_dump_cluster_action_event(self, mock_check, mock_notify): - mock_check.return_value = 'CLUSTER' - entity = mock.Mock() - action = mock.Mock(context=self.ctx, entity=entity) - - res = MSG.MessageEvent.dump(logging.INFO, action) - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_notify.assert_called_once_with(self.ctx, logging.INFO, entity, - action) - - @mock.patch.object(MSG.MessageEvent, '_notify_cluster_action') - @mock.patch.object(base.EventBackend, '_check_entity') - def test_dump_cluster_action_event_warn(self, mock_check, mock_notify): - mock_check.return_value = 'CLUSTER' - entity = mock.Mock() - action = mock.Mock(context=self.ctx, entity=entity) - - res = MSG.MessageEvent.dump(logging.WARNING, action) - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_notify.assert_called_once_with(self.ctx, logging.WARNING, - entity, action) - - @mock.patch.object(MSG.MessageEvent, '_notify_node_action') - @mock.patch.object(base.EventBackend, '_check_entity') - def test_dump_node_action_event(self, mock_check, mock_notify): - mock_check.return_value = 'NODE' - entity = mock.Mock() - action = mock.Mock(context=self.ctx, entity=entity) - - res = MSG.MessageEvent.dump(logging.INFO, action) - - self.assertIsNone(res) - mock_check.assert_called_once_with(entity) - mock_notify.assert_called_once_with(self.ctx, logging.INFO, entity, - action) diff --git a/senlin/tests/unit/fakes.py b/senlin/tests/unit/fakes.py deleted file mode 100644 index 4f2eed6bc..000000000 --- a/senlin/tests/unit/fakes.py +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A module that contains various fake entities -""" - -from senlin.common import schema -from senlin.policies import base as policy_base -from senlin.profiles import base as profile_base - - -class TestProfile(profile_base.Profile): - - CONTEXT = 'context' - - properties_schema = { - CONTEXT: schema.Map("context property"), - 'INT': schema.Integer('int property', default=0), - 'STR': schema.String('string property', default='a string'), - 'MAP': schema.Map( - 'map property', - schema={ - 'KEY1': schema.Integer('key1'), - 'KEY2': schema.String('key2') - } - ), - 'LIST': schema.List( - 'list property', - schema=schema.String('list item'), - ), - } - - OPERATIONS = {} - - def __init__(self, name, spec, **kwargs): - super(TestProfile, self).__init__(name, spec, **kwargs) - - @classmethod - def delete(cls, ctx, profile_id): - super(TestProfile, cls).delete(ctx, profile_id) - - def do_create(self): - return {} - - def do_delete(self, id): - return True - - def do_update(self): - return {} - - def do_check(self, id): - return True - - -class TestPolicy(policy_base.Policy): - VERSION = 1.0 - properties_schema = { - 'KEY1': schema.String('key1', default='default1'), - 'KEY2': schema.Integer('key2', required=True), - } - - TARGET = [ - ('BEFORE', 'CLUSTER_ADD_NODES') - ] - - def __init__(self, name, spec, **kwargs): - super(TestPolicy, self).__init__(name, spec, **kwargs) - - def attach(self, cluster, enabled=True): - return True, {} - - def detach(self, cluster): - return True, 'OK' - - def pre_op(self, cluster_id, action): - return - - def post_op(self, cluster_id, action): - return diff --git a/senlin/tests/unit/health_manager/__init__.py b/senlin/tests/unit/health_manager/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/health_manager/test_service.py b/senlin/tests/unit/health_manager/test_service.py deleted file mode 100644 index 912a9e482..000000000 --- a/senlin/tests/unit/health_manager/test_service.py +++ /dev/null @@ -1,195 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -import oslo_messaging -from oslo_utils import uuidutils - -from senlin.common import consts -from senlin.common import context -from senlin.engine import health_manager as hm -from senlin.health_manager import service -from senlin.objects import health_registry as hr -from senlin.objects import service as obj_service -from senlin.objects import service as service_obj -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestHealthManager(base.SenlinTestCase): - def setUp(self): - super(TestHealthManager, self).setUp() - self.context = utils.dummy_context() - - self.service_id = '4db0a14c-dc10-4131-8ed6-7573987ce9b0' - self.tg = mock.Mock() - self.topic = consts.HEALTH_MANAGER_TOPIC - - self.svc = service.HealthManagerService('HOST', self.topic) - self.svc.service_id = self.service_id - self.svc.tg = self.tg - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers(self, mock_service_init): - service.HealthManagerService('HOST', self.topic) - - mock_service_init.assert_called_once_with(1000) - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers_override(self, mock_service_init): - cfg.CONF.set_override('threads', 100, group='health_manager') - - service.HealthManagerService('HOST', self.topic) - - mock_service_init.assert_called_once_with(100) - - @mock.patch('oslo_service.service.Service.__init__') - def test_service_thread_numbers_override_legacy(self, mock_service_init): - cfg.CONF.set_override('health_manager_thread_pool_size', 101) - - service.HealthManagerService('HOST', self.topic) - - mock_service_init.assert_called_once_with(101) - - def test_init(self): - self.assertEqual(self.service_id, self.svc.service_id) - self.assertEqual(self.tg, self.svc.tg) - self.assertEqual(self.topic, self.svc.topic) - - self.assertEqual(consts.RPC_API_VERSION, self.svc.version) - - @mock.patch.object(uuidutils, 'generate_uuid') - @mock.patch.object(oslo_messaging, 'get_rpc_server') - @mock.patch.object(obj_service.Service, 'create') - def test_service_start(self, mock_service_create, mock_rpc_server, - mock_uuid): - service_uuid = '4db0a14c-dc10-4131-8ed6-7573987ce9b1' - mock_uuid.return_value = service_uuid - - self.svc.start() - - mock_uuid.assert_called_once() - mock_service_create.assert_called_once() - self.svc.server.start.assert_called_once() - - self.assertEqual(service_uuid, self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop(self, mock_delete): - self.svc.server = mock.Mock() - - self.svc.stop() - - self.svc.server.stop.assert_called_once() - self.svc.server.wait.assert_called_once() - - mock_delete.assert_called_once_with(self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'delete') - def test_service_stop_not_yet_started(self, mock_delete): - self.svc.server = None - - self.svc.stop() - - mock_delete.assert_called_once_with(self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_update(self, mock_update): - mock_update.return_value = mock.Mock() - self.svc.service_manage_report() - mock_update.assert_called_once_with(mock.ANY, - self.svc.service_id) - - @mock.patch.object(service_obj.Service, 'update') - def test_service_manage_report_with_exception(self, mock_update): - mock_update.side_effect = Exception('blah') - self.svc.service_manage_report() - self.assertEqual(mock_update.call_count, 1) - - def test_listening(self): - self.assertTrue(self.svc.listening(self.context)) - - def test_task(self): - self.svc.health_registry = mock.Mock() - self.svc.task() - self.svc.health_registry.load_runtime_registry.assert_called_once_with( - ) - - def test_task_with_exception(self): - self.svc.health_registry = mock.Mock() - self.svc.health_registry.load_runtime_registry.side_effect = Exception( - 'blah' - ) - self.svc.task() - self.svc.health_registry.load_runtime_registry.assert_called_once_with( - ) - - def test_enable_cluster(self): - self.svc.health_registry = mock.Mock() - self.svc.enable_cluster(self.context, 'CID') - self.svc.health_registry.enable_cluster.assert_called_once_with( - 'CID') - - def test_disable_cluster(self): - self.svc.health_registry = mock.Mock() - self.svc.disable_cluster(self.context, 'CID') - self.svc.health_registry.disable_cluster.assert_called_once_with( - 'CID') - - def test_register_cluster(self): - self.svc.health_registry = mock.Mock() - self.svc.register_cluster(self.context, 'CID', 60, 160, {}, True) - self.svc.health_registry.register_cluster.assert_called_once_with( - cluster_id='CID', - enabled=True, - interval=60, - node_update_timeout=160, - params={}) - - def test_unregister_cluster(self): - self.svc.health_registry = mock.Mock() - self.svc.unregister_cluster(self.context, 'CID') - self.svc.health_registry.unregister_cluster.assert_called_once_with( - 'CID') - - @mock.patch.object(context, 'get_admin_context') - @mock.patch.object(hr.HealthRegistry, 'get') - def test_get_manager_engine(self, mock_get, mock_ctx): - ctx = mock.Mock() - mock_ctx.return_value = ctx - - registry = mock.Mock(engine_id='fake') - mock_get.return_value = registry - - result = hm.get_manager_engine('CID') - - self.assertEqual(result, 'fake') - - mock_get.assert_called_once_with(ctx, 'CID') - self.assertTrue(mock_ctx.called) - - @mock.patch.object(context, 'get_admin_context') - @mock.patch.object(hr.HealthRegistry, 'get') - def test_get_manager_engine_none(self, mock_get, mock_ctx): - ctx = mock.Mock() - mock_ctx.return_value = ctx - - mock_get.return_value = None - - result = hm.get_manager_engine('CID') - - self.assertIsNone(result) - - mock_get.assert_called_once_with(ctx, 'CID') - self.assertTrue(mock_ctx.called) diff --git a/senlin/tests/unit/objects/__init__.py b/senlin/tests/unit/objects/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/objects/requests/__init__.py b/senlin/tests/unit/objects/requests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/objects/requests/test_actions.py b/senlin/tests/unit/objects/requests/test_actions.py deleted file mode 100644 index 5b90d463e..000000000 --- a/senlin/tests/unit/objects/requests/test_actions.py +++ /dev/null @@ -1,155 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from senlin.objects.requests import actions -from senlin.tests.unit.common import base as test_base - - -class TestActionCreate(test_base.SenlinTestCase): - - body = { - 'name': 'test-action', - 'cluster_id': 'test-cluster', - 'action': 'CLUSTER_CREATE', - } - - def test_action_create_request_body(self): - sot = actions.ActionCreateRequestBody(**self.body) - self.assertEqual('test-action', sot.name) - self.assertEqual('test-cluster', sot.cluster_id) - self.assertEqual('CLUSTER_CREATE', sot.action) - - sot.obj_set_defaults() - - self.assertEqual({}, sot.inputs) - - def test_action_create_request_body_full(self): - body = copy.deepcopy(self.body) - body['inputs'] = {'foo': 'bar'} - sot = actions.ActionCreateRequestBody(**body) - self.assertEqual('test-action', sot.name) - self.assertEqual('test-cluster', sot.cluster_id) - self.assertEqual('CLUSTER_CREATE', sot.action) - self.assertEqual({'foo': 'bar'}, sot.inputs) - - def test_action_create_request_body_to_primitive(self): - sot = actions.ActionCreateRequestBody(**self.body) - res = sot.obj_to_primitive() - self.assertEqual( - { - 'name': u'test-action', - 'cluster_id': u'test-cluster', - 'action': u'CLUSTER_CREATE', - }, - res['senlin_object.data'] - ) - self.assertEqual('ActionCreateRequestBody', - res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertIn('name', res['senlin_object.changes']) - self.assertIn('cluster_id', res['senlin_object.changes']) - - def test_action_create_request_to_primitive(self): - body = actions.ActionCreateRequestBody(**self.body) - request = {'action': body} - sot = actions.ActionCreateRequest(**request) - self.assertIsInstance(sot.action, actions.ActionCreateRequestBody) - - self.assertEqual('test-action', sot.action.name) - self.assertEqual('test-cluster', sot.action.cluster_id) - - res = sot.obj_to_primitive() - self.assertEqual(['action'], res['senlin_object.changes']) - self.assertEqual('ActionCreateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - data = res['senlin_object.data']['action'] - self.assertIn('cluster_id', data['senlin_object.changes']) - self.assertIn('name', data['senlin_object.changes']) - self.assertEqual('ActionCreateRequestBody', - data['senlin_object.name']) - self.assertEqual('senlin', data['senlin_object.namespace']) - self.assertEqual('1.0', data['senlin_object.version']) - - self.assertEqual( - { - 'name': u'test-action', - 'cluster_id': u'test-cluster', - 'action': u'CLUSTER_CREATE', - }, - data['senlin_object.data'] - ) - - -class TestActionList(test_base.SenlinTestCase): - def test_action_list_request_body_full(self): - params = { - 'name': ['node_create_12345678'], - 'action': ['NODE_CREATE'], - 'target': ['0df0931b-e251-4f2e-8719-4effda3627bb'], - 'status': ['READY'], - 'limit': 5, - 'marker': 'f1ed0d50-7651-4599-a8cb-c86e9c7123f6', - 'sort': 'name:asc', - 'project_safe': False, - } - sot = actions.ActionListRequest(**params) - self.assertEqual(['node_create_12345678'], sot.name) - self.assertEqual(['NODE_CREATE'], sot.action) - self.assertEqual(['0df0931b-e251-4f2e-8719-4effda3627bb'], sot.target) - self.assertEqual(['READY'], sot.status) - self.assertEqual(5, sot.limit) - self.assertEqual('f1ed0d50-7651-4599-a8cb-c86e9c7123f6', sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - def test_action_list_request_body_default(self): - sot = actions.ActionListRequest() - sot.obj_set_defaults() - self.assertTrue(sot.project_safe) - - -class TestActionGet(test_base.SenlinTestCase): - - body = { - 'identity': 'test-action' - } - - def test_action_get_request(self): - sot = actions.ActionGetRequest(**self.body) - self.assertEqual('test-action', sot.identity) - - -class TestActionDelete(test_base.SenlinTestCase): - - body = { - 'identity': 'test-action' - } - - def test_action_get_request(self): - sot = actions.ActionDeleteRequest(**self.body) - self.assertEqual('test-action', sot.identity) - - -class TestActionUpdate(test_base.SenlinTestCase): - - body = { - 'identity': 'test-action', - 'status': 'CANCELLED' - } - - def test_action_update_request(self): - sot = actions.ActionUpdateRequest(**self.body) - self.assertEqual('test-action', sot.identity) - self.assertEqual('CANCELLED', sot.status) diff --git a/senlin/tests/unit/objects/requests/test_cluster_policies.py b/senlin/tests/unit/objects/requests/test_cluster_policies.py deleted file mode 100644 index c50ead0ff..000000000 --- a/senlin/tests/unit/objects/requests/test_cluster_policies.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from senlin.objects.requests import cluster_policies as cp -from senlin.tests.unit.common import base as test_base - - -class TestClusterPolicyList(test_base.SenlinTestCase): - - params = { - 'identity': 'fake_cluster', - 'policy_name': 'fake_name', - 'policy_type': 'fake_type', - 'enabled': True, - 'sort': 'enabled' - } - - def test_cluster_policy_list(self): - data = self.params - - sot = cp.ClusterPolicyListRequest(**data) - self.assertEqual('fake_cluster', sot.identity) - self.assertEqual('fake_name', sot.policy_name) - self.assertEqual('fake_type', sot.policy_type) - self.assertTrue(sot.enabled) - self.assertEqual('enabled', sot.sort) - - def test_cluster_policy_list_invalid_param(self): - data = copy.deepcopy(self.params) - data['enabled'] = 'bad' - ex = self.assertRaises(ValueError, cp.ClusterPolicyListRequest, - **data) - self.assertEqual("Unrecognized value 'bad', acceptable values are: " - "'0', '1', 'f', 'false', 'n', 'no', 'off', 'on', " - "'t', 'true', 'y', 'yes'", str(ex)) - - def test_cluster_policy_list_primitive(self): - data = self.params - - sot = cp.ClusterPolicyListRequest(**data) - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertIn('sort', res['senlin_object.changes']) - self.assertIn('enabled', res['senlin_object.changes']) - self.assertIn('policy_name', res['senlin_object.changes']) - self.assertIn('policy_type', res['senlin_object.changes']) - - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('ClusterPolicyListRequest', res['senlin_object.name']) - - param = res['senlin_object.data'] - self.assertEqual('fake_cluster', param['identity']) - self.assertEqual('enabled', param['sort']) - self.assertEqual('fake_name', param['policy_name']) - self.assertEqual('fake_type', param['policy_type']) - self.assertTrue(param['enabled']) - - -class TestClusterPolicyGet(test_base.SenlinTestCase): - - def test_cluster_policy_get(self): - sot = cp.ClusterPolicyGetRequest(identity='cid', policy_id='pid') - - self.assertEqual('cid', sot.identity) - self.assertEqual('pid', sot.policy_id) - - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertIn('policy_id', res['senlin_object.changes']) - - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('ClusterPolicyGetRequest', res['senlin_object.name']) - - data = res['senlin_object.data'] - self.assertEqual('cid', data['identity']) - self.assertEqual('pid', data['policy_id']) - - def test_cluster_policy_get_invalid_params(self): - - ex = self.assertRaises(ValueError, cp.ClusterPolicyGetRequest, - identity='cid', policy_id=['bad']) - self.assertEqual("A string is required in field policy_id, not a list", - str(ex)) diff --git a/senlin/tests/unit/objects/requests/test_clusters.py b/senlin/tests/unit/objects/requests/test_clusters.py deleted file mode 100644 index 24fbbf79a..000000000 --- a/senlin/tests/unit/objects/requests/test_clusters.py +++ /dev/null @@ -1,587 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_config import cfg - -from senlin.common import consts -from senlin.objects.requests import clusters -from senlin.tests.unit.common import base as test_base - -CONF = cfg.CONF -CONF.import_opt('default_action_timeout', 'senlin.conf') -CONF.import_opt('max_nodes_per_cluster', 'senlin.conf') - - -class TestClusterCreate(test_base.SenlinTestCase): - - body = { - 'name': 'test-cluster', - 'profile_id': 'test-profile', - } - - def test_cluster_create_request_body(self): - sot = clusters.ClusterCreateRequestBody(**self.body) - self.assertEqual('test-cluster', sot.name) - self.assertEqual('test-profile', sot.profile_id) - - self.assertFalse(sot.obj_attr_is_set('min_size')) - self.assertFalse(sot.obj_attr_is_set('max_size')) - self.assertFalse(sot.obj_attr_is_set('desired_capacity')) - self.assertFalse(sot.obj_attr_is_set('metadata')) - self.assertFalse(sot.obj_attr_is_set('timeout')) - self.assertFalse(sot.obj_attr_is_set('config')) - - sot.obj_set_defaults() - - self.assertTrue(sot.obj_attr_is_set('min_size')) - self.assertEqual(consts.CLUSTER_DEFAULT_MIN_SIZE, sot.min_size) - self.assertEqual(consts.CLUSTER_DEFAULT_MAX_SIZE, sot.max_size) - self.assertEqual({}, sot.metadata) - self.assertEqual(CONF.default_action_timeout, sot.timeout) - self.assertEqual({}, sot.config) - - def test_cluster_create_request_body_full(self): - body = copy.deepcopy(self.body) - body['min_size'] = 1 - body['max_size'] = 10 - body['desired_capacity'] = 4 - body['metadata'] = {'foo': 'bar'} - body['timeout'] = 121 - body['config'] = {'k1': 'v1'} - - sot = clusters.ClusterCreateRequestBody(**body) - - self.assertEqual('test-cluster', sot.name) - self.assertEqual('test-profile', sot.profile_id) - self.assertEqual(1, sot.min_size) - self.assertEqual(10, sot.max_size) - self.assertEqual(4, sot.desired_capacity) - self.assertEqual({'foo': 'bar'}, sot.metadata) - self.assertEqual(121, sot.timeout) - self.assertEqual({'k1': 'v1'}, sot.config) - - def test_request_body_to_primitive(self): - sot = clusters.ClusterCreateRequestBody(**self.body) - res = sot.obj_to_primitive() - self.assertEqual( - { - 'name': u'test-cluster', - 'profile_id': u'test-profile' - }, - res['senlin_object.data'] - ) - self.assertEqual('ClusterCreateRequestBody', - res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.1', res['senlin_object.version']) - self.assertIn('profile_id', res['senlin_object.changes']) - self.assertIn('name', res['senlin_object.changes']) - - def test_request_to_primitive(self): - body = clusters.ClusterCreateRequestBody(**self.body) - request = {'cluster': body} - sot = clusters.ClusterCreateRequest(**request) - self.assertIsInstance(sot.cluster, clusters.ClusterCreateRequestBody) - - self.assertEqual('test-cluster', sot.cluster.name) - self.assertEqual('test-profile', sot.cluster.profile_id) - - res = sot.obj_to_primitive() - self.assertEqual(['cluster'], res['senlin_object.changes']) - self.assertEqual('ClusterCreateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - data = res['senlin_object.data']['cluster'] - self.assertIn('profile_id', data['senlin_object.changes']) - self.assertIn('name', data['senlin_object.changes']) - self.assertEqual('ClusterCreateRequestBody', - data['senlin_object.name']) - self.assertEqual('senlin', data['senlin_object.namespace']) - self.assertEqual('1.1', data['senlin_object.version']) - self.assertEqual( - {'name': u'test-cluster', 'profile_id': u'test-profile'}, - data['senlin_object.data'] - ) - - def test_init_body_err_min_size_too_low(self): - body = copy.deepcopy(self.body) - body['min_size'] = -1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("The value for the min_size field must be greater " - "than or equal to 0.", - str(ex)) - - def test_init_body_err_min_size_too_high(self): - body = copy.deepcopy(self.body) - body['min_size'] = CONF.max_nodes_per_cluster + 1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("The value for the min_size field must be less than " - "or equal to %s." % CONF.max_nodes_per_cluster, - str(ex)) - - def test_init_body_err_max_size_too_low(self): - body = copy.deepcopy(self.body) - body['max_size'] = -2 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("The value for the max_size field must be greater " - "than or equal to -1.", - str(ex)) - - def test_init_body_err_max_size_too_high(self): - body = copy.deepcopy(self.body) - body['max_size'] = CONF.max_nodes_per_cluster + 1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("The value for the max_size field must be less than " - "or equal to %s." % CONF.max_nodes_per_cluster, - str(ex)) - - def test_init_body_err_desired_too_low(self): - body = copy.deepcopy(self.body) - body['desired_capacity'] = -1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("The value for the desired_capacity field must be " - "greater than or equal to 0.", - str(ex)) - - def test_init_body_err_desired_too_high(self): - body = copy.deepcopy(self.body) - body['desired_capacity'] = CONF.max_nodes_per_cluster + 1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual(("The value for the desired_capacity field must be " - "less than or equal to %s." % - CONF.max_nodes_per_cluster), - str(ex)) - - def test_init_body_err_timeout_negative(self): - body = copy.deepcopy(self.body) - body['timeout'] = -1 - - ex = self.assertRaises(ValueError, - clusters.ClusterCreateRequestBody, - **body) - - self.assertEqual("Value must be >= 0 for field 'timeout'.", - str(ex)) - - -class TestClusterList(test_base.SenlinTestCase): - - params = { - 'project_safe': True, - } - - def test_init(self): - sot = clusters.ClusterListRequest() - - self.assertFalse(sot.obj_attr_is_set('project_safe')) - self.assertFalse(sot.obj_attr_is_set('name')) - self.assertFalse(sot.obj_attr_is_set('status')) - self.assertFalse(sot.obj_attr_is_set('limit')) - self.assertFalse(sot.obj_attr_is_set('marker')) - self.assertFalse(sot.obj_attr_is_set('sort')) - - sot.obj_set_defaults() - - self.assertTrue(sot.project_safe) - self.assertFalse(sot.obj_attr_is_set('name')) - self.assertFalse(sot.obj_attr_is_set('status')) - self.assertFalse(sot.obj_attr_is_set('limit')) - self.assertFalse(sot.obj_attr_is_set('marker')) - self.assertIsNone(sot.sort) - - def test_cluster_list_request_body_full(self): - params = { - 'name': ['name1'], - 'status': ['ACTIVE'], - 'limit': '4', # a test of having string as limit - 'marker': '09013587-c1e9-4c98-9c0c-d357004363e1', - 'sort': 'name:asc', - 'project_safe': 'False', # a test of flexible boolean - } - sot = clusters.ClusterListRequest(**params) - self.assertEqual(['name1'], sot.name) - self.assertEqual(['ACTIVE'], sot.status) - self.assertEqual(4, sot.limit) - self.assertEqual('09013587-c1e9-4c98-9c0c-d357004363e1', sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - -class TestClusterGet(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterGetRequest(identity='foo') - - self.assertEqual('foo', sot.identity) - - -class TestClusterUpdate(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterUpdateRequest(identity='foo') - - self.assertEqual('foo', sot.identity) - self.assertFalse(sot.obj_attr_is_set('name')) - self.assertFalse(sot.obj_attr_is_set('profile_id')) - self.assertFalse(sot.obj_attr_is_set('metadata')) - self.assertFalse(sot.obj_attr_is_set('timeout')) - self.assertFalse(sot.obj_attr_is_set('profile_only')) - self.assertFalse(sot.obj_attr_is_set('config')) - - def test_init_with_params(self): - sot = clusters.ClusterUpdateRequest(identity='foo', name='new-name', - profile_id='new-profile', - metadata={'newkey': 'newvalue'}, - timeout=4567, profile_only=True, - config={'foo': 'bar'}) - - self.assertEqual('foo', sot.identity) - self.assertEqual('new-name', sot.name) - self.assertEqual('new-profile', sot.profile_id) - self.assertEqual({'newkey': 'newvalue'}, sot.metadata) - self.assertEqual(4567, sot.timeout) - self.assertTrue(sot.profile_only) - self.assertEqual({'foo': 'bar'}, sot.config) - - -class TestClusterAddNodes(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterAddNodesRequest(identity='foo', nodes=['abc']) - - self.assertEqual('foo', sot.identity) - self.assertEqual(['abc'], sot.nodes) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterAddNodesRequest, - identity='foo', nodes=[]) - self.assertEqual("Value for 'nodes' must have at least 1 item(s).", - str(ex)) - - -class TestClusterDelNodes(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterDelNodesRequest(identity='foo', nodes=['abc'], - destroy_after_deletion=True) - - self.assertEqual('foo', sot.identity) - self.assertEqual(['abc'], sot.nodes) - self.assertTrue(sot.destroy_after_deletion) - - def test_init_without_destroy(self): - sot = clusters.ClusterDelNodesRequest(identity='foo', nodes=['abc'], - destroy_after_deletion=False) - - self.assertEqual('foo', sot.identity) - self.assertEqual(['abc'], sot.nodes) - self.assertFalse(sot.destroy_after_deletion) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterDelNodesRequest, - identity='foo', nodes=[]) - self.assertEqual("Value for 'nodes' must have at least 1 item(s).", - str(ex)) - - -class TestClusterResize(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterResizeRequest(identity='foo') - - self.assertEqual('foo', sot.identity) - self.assertFalse(sot.obj_attr_is_set('adjustment_type')) - self.assertFalse(sot.obj_attr_is_set('number')) - self.assertFalse(sot.obj_attr_is_set('min_size')) - self.assertFalse(sot.obj_attr_is_set('max_size')) - self.assertFalse(sot.obj_attr_is_set('min_step')) - self.assertFalse(sot.obj_attr_is_set('strict')) - - def test_init_with_params(self): - sot = clusters.ClusterResizeRequest(identity='foo', - adjustment_type='EXACT_CAPACITY', - number=100, - min_size=10, - max_size=100, - min_step=1, - strict=False) - - self.assertEqual('foo', sot.identity) - self.assertEqual('EXACT_CAPACITY', sot.adjustment_type) - self.assertEqual(100, sot.number) - self.assertEqual(10, sot.min_size) - self.assertEqual(100, sot.max_size) - self.assertEqual(1, sot.min_step) - self.assertFalse(sot.strict) - - def test_init_failed_type(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', adjustment_type='BOGUS') - self.assertEqual("Value 'BOGUS' is not acceptable for field " - "'adjustment_type'.", - str(ex)) - - def test_init_failed_number(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', number='foo') - self.assertIn("could not convert string to float", str(ex)) - - def test_init_failed_min_size(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', min_size=-1) - self.assertEqual("The value for the min_size field must be greater " - "than or equal to 0.", - str(ex)) - - def test_init_failed_max_size(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', max_size=-2) - self.assertEqual("The value for the max_size field must be greater " - "than or equal to -1.", - str(ex)) - - def test_init_failed_min_step(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', min_step=-3) - self.assertEqual("Value must be >= 0 for field 'min_step'.", - str(ex)) - - def test_init_failed_strict(self): - ex = self.assertRaises(ValueError, - clusters.ClusterResizeRequest, - identity='foo', strict='fake') - self.assertIn("Unrecognized value 'fake'", str(ex)) - - -class TestClusterScaleIn(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterScaleInRequest(identity='foo', count=5) - - self.assertEqual('foo', sot.identity) - self.assertEqual(5, sot.count) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterScaleInRequest, - identity='foo', count=-1) - self.assertEqual("Value must be >= 0 for field 'count'.", - str(ex)) - - -class TestClusterScaleOut(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterScaleOutRequest(identity='foo', count=5) - - self.assertEqual('foo', sot.identity) - self.assertEqual(5, sot.count) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterScaleOutRequest, - identity='foo', count=-1) - self.assertEqual("Value must be >= 0 for field 'count'.", - str(ex)) - - -class TestClusterAttachPolicy(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterAttachPolicyRequest(identity='foo', - policy_id='bar') - - self.assertEqual('foo', sot.identity) - self.assertEqual('bar', sot.policy_id) - self.assertFalse(sot.obj_attr_is_set('enabled')) - - sot.obj_set_defaults() - self.assertTrue(sot.obj_attr_is_set('enabled')) - self.assertTrue(sot.enabled) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterAttachPolicyRequest, - identity='foo', enabled='Bogus') - - self.assertIn("Unrecognized value 'Bogus'", str(ex)) - - -class TestClusterUpdatePolicy(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterUpdatePolicyRequest(identity='foo', - policy_id='bar') - - self.assertEqual('foo', sot.identity) - self.assertEqual('bar', sot.policy_id) - self.assertFalse(sot.obj_attr_is_set('enabled')) - - sot.obj_set_defaults() - self.assertTrue(sot.obj_attr_is_set('enabled')) - self.assertTrue(sot.enabled) - - def test_init_failed(self): - ex = self.assertRaises(ValueError, - clusters.ClusterUpdatePolicyRequest, - identity='foo', enabled='Bogus') - - self.assertIn("Unrecognized value 'Bogus'", str(ex)) - - -class TestClusterDetachPolicy(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterDetachPolicyRequest(identity='foo', - policy_id='bar') - self.assertEqual('foo', sot.identity) - self.assertEqual('bar', sot.policy_id) - - -class TestClusterCheck(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterCheckRequest(identity='cluster', - params={'foo': 'bar'}) - self.assertEqual('cluster', sot.identity) - self.assertEqual({'foo': 'bar'}, sot.params) - - def test_init_partial(self): - sot = clusters.ClusterCheckRequest(identity='cluster') - self.assertEqual('cluster', sot.identity) - self.assertFalse(sot.obj_attr_is_set('params')) - - -class TestClusterRecover(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterRecoverRequest(identity='cluster', - params={'foo': 'bar'}) - self.assertEqual('cluster', sot.identity) - self.assertEqual({'foo': 'bar'}, sot.params) - - def test_init_partial(self): - sot = clusters.ClusterRecoverRequest(identity='cluster') - self.assertEqual('cluster', sot.identity) - self.assertFalse(sot.obj_attr_is_set('params')) - - -class TestClusterReplaceNodes(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterReplaceNodesRequest( - identity='foo', nodes={'old1': 'new1', 'old2': 'new2'}) - - self.assertEqual('foo', sot.identity) - self.assertEqual({'old1': 'new1', 'old2': 'new2'}, sot.nodes) - - def test_init_missing_value(self): - ex = self.assertRaises(ValueError, - clusters.ClusterReplaceNodesRequest, - identity='foo', - nodes={'old1': None, 'old2': 'new2'}) - - self.assertEqual("Field `nodes[old1]' cannot be None", - str(ex)) - - def test_init_duplicated_nodes(self): - ex = self.assertRaises(ValueError, - clusters.ClusterReplaceNodesRequest, - identity='foo', - nodes={'old1': 'new2', 'old2': 'new2'}) - - self.assertEqual("Map contains duplicated values", - str(ex)) - - -class TestClusterCollect(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterCollectRequest(identity='foo', - path='path/to/attr') - - self.assertEqual('foo', sot.identity) - self.assertEqual('path/to/attr', sot.path) - - -class TestClusterOperation(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterOperationRequest( - identity='foo', filters={'role': 'slave'}, - operation='dance', params={'style': 'tango'}) - - self.assertEqual('foo', sot.identity) - self.assertEqual('dance', sot.operation) - self.assertEqual({'role': 'slave'}, sot.filters) - self.assertEqual({'style': 'tango'}, sot.params) - - def test_init_minimal(self): - sot = clusters.ClusterOperationRequest(identity='foo', - operation='dance') - - self.assertEqual('foo', sot.identity) - self.assertEqual('dance', sot.operation) - self.assertFalse(sot.obj_attr_is_set('filters')) - self.assertFalse(sot.obj_attr_is_set('params')) - sot.obj_set_defaults() - self.assertEqual({}, sot.filters) - self.assertEqual({}, sot.params) - - -class TestClusterDelete(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterDeleteRequest(identity='foo') - self.assertEqual('foo', sot.identity) - - -class TestClusterCompleteLifecycle(test_base.SenlinTestCase): - - def test_init(self): - sot = clusters.ClusterCompleteLifecycleRequest( - identity='foo', lifecycle_action_token='abc') - - self.assertEqual('foo', sot.identity) - self.assertEqual('abc', sot.lifecycle_action_token) diff --git a/senlin/tests/unit/objects/requests/test_credentials.py b/senlin/tests/unit/objects/requests/test_credentials.py deleted file mode 100644 index 51cd75b3f..000000000 --- a/senlin/tests/unit/objects/requests/test_credentials.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from senlin.objects.requests import credentials -from senlin.tests.unit.common import base as test_base - - -class TestCredentialCreate(test_base.SenlinTestCase): - - body = { - 'cred': { - 'openstack': { - 'trust': 'f49419fd-e48b-4e8c-a201-30eb4560acf4' - } - } - } - - def test_credential_create_request(self): - sot = credentials.CredentialCreateRequest(**self.body) - self.assertEqual(self.body['cred'], sot.cred) - sot.obj_set_defaults() - self.assertEqual({}, sot.attrs) - - def test_credential_create_request_full(self): - body = copy.deepcopy(self.body) - body['attrs'] = {'foo': 'bar'} - sot = credentials.CredentialCreateRequest(**body) - self.assertEqual(body['cred'], sot.cred) - self.assertEqual(body['attrs'], sot.attrs) - - -class TestCredentialGet(test_base.SenlinTestCase): - - body = { - 'user': 'test-user', - 'project': 'test-project' - } - - def test_credential_get_request(self): - sot = credentials.CredentialGetRequest(**self.body) - self.assertEqual('test-user', sot.user) - self.assertEqual('test-project', sot.project) - sot.obj_set_defaults() - self.assertEqual({}, sot.query) - - def test_credential_get_request_full(self): - body = copy.deepcopy(self.body) - body['query'] = {'foo': 'bar'} - sot = credentials.CredentialGetRequest(**body) - self.assertEqual('test-user', sot.user) - self.assertEqual('test-project', sot.project) - self.assertEqual({'foo': 'bar'}, sot.query) - - -class TestCredentialUpdate(test_base.SenlinTestCase): - - body = { - 'cred': { - 'openstack': { - 'trust': 'f49419fd-e48b-4e8c-a201-30eb4560acf4' - } - } - } - - def test_credential_update_request(self): - sot = credentials.CredentialUpdateRequest(**self.body) - self.assertEqual(self.body['cred'], sot.cred) - sot.obj_set_defaults() - self.assertEqual({}, sot.attrs) diff --git a/senlin/tests/unit/objects/requests/test_events.py b/senlin/tests/unit/objects/requests/test_events.py deleted file mode 100644 index fd11b3a5b..000000000 --- a/senlin/tests/unit/objects/requests/test_events.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects.requests import events -from senlin.tests.unit.common import base as test_base - - -class TestEventList(test_base.SenlinTestCase): - - def test_action_list_request_body_full(self): - params = { - 'oid': ['f23ff00c-ec4f-412d-bd42-7f6e209819cb'], - 'otype': ['NODE'], - 'oname': ['mynode1'], - 'action': ['NODE_CREATE'], - 'cluster_id': ['f23ff00c-ec4f-412d-bd42-7f6e209819cb'], - 'level': ['ERROR'], - 'limit': 5, - 'marker': '98625fd0-b120-416c-a978-2fbe28c46820', - 'sort': 'timestamp:asc', - 'project_safe': False, - } - sot = events.EventListRequest(**params) - self.assertEqual(['f23ff00c-ec4f-412d-bd42-7f6e209819cb'], sot.oid) - self.assertEqual(['NODE'], sot.otype) - self.assertEqual(['mynode1'], sot.oname) - self.assertEqual(['NODE_CREATE'], sot.action) - self.assertEqual(['f23ff00c-ec4f-412d-bd42-7f6e209819cb'], - sot.cluster_id) - self.assertEqual(['ERROR'], sot.level) - self.assertEqual(5, sot.limit) - self.assertEqual('98625fd0-b120-416c-a978-2fbe28c46820', sot.marker) - self.assertEqual('timestamp:asc', sot.sort) - self.assertFalse(sot.project_safe) - - def test_event_list_request_body_default(self): - sot = events.EventListRequest() - sot.obj_set_defaults() - self.assertTrue(sot.project_safe) - - -class TestEventGet(test_base.SenlinTestCase): - - body = { - 'identity': 'test-event' - } - - def test_event_get_request(self): - sot = events.EventListRequest(**self.body) - self.assertEqual('test-event', sot.identity) diff --git a/senlin/tests/unit/objects/requests/test_nodes.py b/senlin/tests/unit/objects/requests/test_nodes.py deleted file mode 100644 index 48f12e922..000000000 --- a/senlin/tests/unit/objects/requests/test_nodes.py +++ /dev/null @@ -1,250 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_config import cfg - -from senlin.objects.requests import nodes -from senlin.tests.unit.common import base as test_base - -CONF = cfg.CONF -CONF.import_opt('default_action_timeout', 'senlin.conf') - - -class TestNodeCreate(test_base.SenlinTestCase): - - body = { - 'name': 'test-node', - 'profile_id': 'test-profile', - } - - def test_node_create_request_body(self): - sot = nodes.NodeCreateRequestBody(**self.body) - self.assertEqual('test-node', sot.name) - self.assertEqual('test-profile', sot.profile_id) - - sot.obj_set_defaults() - - self.assertEqual('', sot.cluster_id) - self.assertEqual('', sot.role) - self.assertEqual({}, sot.metadata) - - def test_node_create_request_body_full(self): - body = copy.deepcopy(self.body) - body['role'] = 'master' - body['cluster_id'] = 'cluster-01' - body['metadata'] = {'foo': 'bar'} - sot = nodes.NodeCreateRequestBody(**body) - self.assertEqual('test-node', sot.name) - self.assertEqual('test-profile', sot.profile_id) - self.assertEqual('cluster-01', sot.cluster_id) - self.assertEqual('master', sot.role) - self.assertEqual({'foo': 'bar'}, sot.metadata) - - def test_request_body_to_primitive(self): - sot = nodes.NodeCreateRequestBody(**self.body) - res = sot.obj_to_primitive() - self.assertEqual( - { - 'name': u'test-node', - 'profile_id': u'test-profile' - }, - res['senlin_object.data'] - ) - self.assertEqual('NodeCreateRequestBody', - res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertIn('profile_id', res['senlin_object.changes']) - self.assertIn('name', res['senlin_object.changes']) - - def test_request_to_primitive(self): - body = nodes.NodeCreateRequestBody(**self.body) - request = {'node': body} - sot = nodes.NodeCreateRequest(**request) - self.assertIsInstance(sot.node, nodes.NodeCreateRequestBody) - - self.assertEqual('test-node', sot.node.name) - self.assertEqual('test-profile', sot.node.profile_id) - - res = sot.obj_to_primitive() - self.assertEqual(['node'], res['senlin_object.changes']) - self.assertEqual('NodeCreateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - data = res['senlin_object.data']['node'] - self.assertIn('profile_id', data['senlin_object.changes']) - self.assertIn('name', data['senlin_object.changes']) - self.assertEqual('NodeCreateRequestBody', - data['senlin_object.name']) - self.assertEqual('senlin', data['senlin_object.namespace']) - self.assertEqual('1.0', data['senlin_object.version']) - self.assertEqual( - {'name': u'test-node', 'profile_id': u'test-profile'}, - data['senlin_object.data'] - ) - - -class TestNodeList(test_base.SenlinTestCase): - - def test_node_list_request_body_full(self): - params = { - 'cluster_id': '8c3c9af7-d768-4c5a-a21e-5261b22d749d', - 'name': ['node01'], - 'status': ['ACTIVE'], - 'limit': 3, - 'marker': 'f1ed0d50-7651-4599-a8cb-c86e9c7123f5', - 'sort': 'name:asc', - 'project_safe': False, - } - sot = nodes.NodeListRequest(**params) - self.assertEqual('8c3c9af7-d768-4c5a-a21e-5261b22d749d', - sot.cluster_id) - self.assertEqual(['node01'], sot.name) - self.assertEqual(['ACTIVE'], sot.status) - self.assertEqual(3, sot.limit) - self.assertEqual('f1ed0d50-7651-4599-a8cb-c86e9c7123f5', sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - def test_node_list_request_body_default(self): - sot = nodes.NodeListRequest() - sot.obj_set_defaults() - self.assertTrue(sot.project_safe) - - -class TestNodeGet(test_base.SenlinTestCase): - - def test_node_get_request_full(self): - params = { - 'identity': 'node-001', - 'show_details': True, - } - sot = nodes.NodeGetRequest(**params) - self.assertEqual('node-001', sot.identity) - self.assertTrue(sot.show_details) - - def test_node_get_request_default(self): - sot = nodes.NodeGetRequest() - sot.obj_set_defaults() - self.assertFalse(sot.show_details) - - -class TestNodeUpdate(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'name': 'test-node-newname', - 'profile_id': 'test-profile', - 'metadata': {'foo': 'bar'}, - 'role': 'master' - } - - def test_node_update_request(self): - sot = nodes.NodeUpdateRequest(**self.body) - self.assertEqual('test-node', sot.identity) - self.assertEqual('test-node-newname', sot.name) - self.assertEqual('test-profile', sot.profile_id) - self.assertEqual('master', sot.role) - self.assertEqual({'foo': 'bar'}, sot.metadata) - - -class TestNodeDelete(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node' - } - - def test_node_delete_request(self): - sot = nodes.NodeDeleteRequest(**self.body) - self.assertEqual('test-node', sot.identity) - - -class TestNodeCheck(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'params': {'foo': 'bar'}, - } - - def test_node_check_request(self): - sot = nodes.NodeCheckRequest(**self.body) - self.assertEqual({'foo': 'bar'}, sot.params) - - -class TestNodeRecover(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'params': {'foo': 'bar'}, - } - - def test_node_recover_request(self): - sot = nodes.NodeRecoverRequest(**self.body) - self.assertEqual({'foo': 'bar'}, sot.params) - - -class TestNodeOperation(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'operation': 'dance', - 'params': {'foo': 'bar'}, - } - - def test_node_operation_request(self): - sot = nodes.NodeOperationRequest(**self.body) - self.assertEqual('test-node', sot.identity) - self.assertEqual('dance', sot.operation) - self.assertEqual({'foo': 'bar'}, sot.params) - - -class TestNodeAdopt(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'type': 'test-type', - 'name': 'test-name', - 'cluster': 'test-cluster', - 'role': 'test-role', - 'metadata': {'key': 'value'}, - 'overrides': {'foo': 'bar'}, - 'snapshot': True - } - - def test_node_adopt_request(self): - sot = nodes.NodeAdoptRequest(**self.body) - self.assertEqual('test-node', sot.identity) - self.assertEqual('test-type', sot.type) - self.assertEqual('test-name', sot.name) - self.assertEqual('test-cluster', sot.cluster) - self.assertEqual('test-role', sot.role) - self.assertEqual({'key': 'value'}, sot.metadata) - self.assertEqual({'foo': 'bar'}, sot.overrides) - self.assertTrue(sot.snapshot) - - -class TestNodeAdoptPreview(test_base.SenlinTestCase): - - body = { - 'identity': 'test-node', - 'type': 'test-type', - 'overrides': {'foo': 'bar'}, - 'snapshot': True - } - - def test_node_adopt_request(self): - sot = nodes.NodeAdoptPreviewRequest(**self.body) - self.assertEqual('test-node', sot.identity) - self.assertEqual('test-type', sot.type) - self.assertEqual({'foo': 'bar'}, sot.overrides) - self.assertTrue(sot.snapshot) diff --git a/senlin/tests/unit/objects/requests/test_policies.py b/senlin/tests/unit/objects/requests/test_policies.py deleted file mode 100644 index 2a63d0fe2..000000000 --- a/senlin/tests/unit/objects/requests/test_policies.py +++ /dev/null @@ -1,249 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_serialization import jsonutils -from senlin.objects.requests import policies -from senlin.tests.unit.common import base as test_base - - -class TestPolicyList(test_base.SenlinTestCase): - - def test_policy_list_request_body_full(self): - params = { - 'name': ['policy1'], - 'type': ['senlin.policy.scaling-1.0'], - 'limit': 2, - 'marker': 'd6901ce0-1403-4b9c-abf5-25c59cf79823', - 'sort': 'name:asc', - 'project_safe': False - } - sot = policies.PolicyListRequest(**params) - self.assertEqual(['policy1'], sot.name) - self.assertEqual(['senlin.policy.scaling-1.0'], sot.type) - self.assertEqual(2, sot.limit) - self.assertEqual('d6901ce0-1403-4b9c-abf5-25c59cf79823', sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - -class TestPolicyCreate(test_base.SenlinTestCase): - - spec = { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - - def test_policy_create_body(self): - spec = copy.deepcopy(self.spec) - sot = policies.PolicyCreateRequestBody(name='foo', spec=spec) - self.assertEqual('foo', sot.name) - self.assertEqual('senlin.policy.scaling', sot.spec['type']) - self.assertEqual('1.0', sot.spec['version']) - - def test_policy_create_request(self): - spec = copy.deepcopy(self.spec) - policy = policies.PolicyCreateRequestBody(name='foo', spec=spec) - sot = policies.PolicyCreateRequest(policy=policy) - - self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody) - - def test_request_body_to_primitive(self): - spec = copy.deepcopy(self.spec) - sot = policies.PolicyCreateRequestBody(name='foo', spec=spec) - self.assertEqual('foo', sot.name) - - res = sot.obj_to_primitive() - # request body - self.assertEqual('PolicyCreateRequestBody', res['senlin_object.name']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertIn('name', res['senlin_object.changes']) - self.assertIn('spec', res['senlin_object.changes']) - # spec - data = res['senlin_object.data'] - self.assertEqual(u'foo', data['name']) - spec_data = jsonutils.loads(data['spec']) - self.assertEqual('senlin.policy.scaling', spec_data['type']) - self.assertEqual('1.0', spec_data['version']) - - def test_request_to_primitive(self): - spec = copy.deepcopy(self.spec) - body = policies.PolicyCreateRequestBody(name='foo', spec=spec) - sot = policies.PolicyCreateRequest(policy=body) - - self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody) - self.assertEqual('foo', sot.policy.name) - - res = sot.obj_to_primitive() - self.assertIn('policy', res['senlin_object.changes']) - self.assertEqual('PolicyCreateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - data = res['senlin_object.data']['policy'] - self.assertEqual('PolicyCreateRequestBody', data['senlin_object.name']) - self.assertEqual('senlin', data['senlin_object.namespace']) - self.assertEqual('1.0', data['senlin_object.version']) - self.assertIn('name', data['senlin_object.changes']) - self.assertIn('spec', data['senlin_object.changes']) - - pd = data['senlin_object.data'] - self.assertEqual(u'foo', pd['name']) - - spec_data = jsonutils.loads(pd['spec']) - self.assertEqual('senlin.policy.scaling', spec_data['type']) - self.assertEqual('1.0', spec_data['version']) - - -class TestPolicyGet(test_base.SenlinTestCase): - - def test_policy_get(self): - sot = policies.PolicyGetRequest(identity='foo') - - self.assertEqual('foo', sot.identity) - - -class TestPolicyUpdate(test_base.SenlinTestCase): - - def test_policy_update_body(self): - data = {'name': 'foo'} - sot = policies.PolicyUpdateRequestBody(**data) - self.assertEqual('foo', sot.name) - - def test_policy_update(self): - data = {'name': 'foo'} - body = policies.PolicyUpdateRequestBody(**data) - - request = { - 'identity': 'pid', - 'policy': body - } - sot = policies.PolicyUpdateRequest(**request) - self.assertEqual('pid', sot.identity) - self.assertIsInstance(sot.policy, policies.PolicyUpdateRequestBody) - - def test_policy_data_to_primitive(self): - data = {'name': 'foo'} - sot = policies.PolicyUpdateRequestBody(**data) - res = sot.obj_to_primitive() - - self.assertIn('name', res['senlin_object.changes']) - self.assertEqual(u'foo', res['senlin_object.data']['name']) - self.assertEqual('PolicyUpdateRequestBody', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - def test_request_to_primitive(self): - data = {'name': 'foo'} - name = policies.PolicyUpdateRequestBody(**data) - - request = { - 'identity': 'pid', - 'name': name - } - sot = policies.PolicyUpdateRequest(**request) - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertEqual(u'pid', res['senlin_object.data']['identity']) - self.assertEqual('PolicyUpdateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - -class TestPolicyValidate(test_base.SenlinTestCase): - spec = { - "properties": { - "adjustment": { - "min_step": 1, - "number": 1, - "type": "CHANGE_IN_CAPACITY" - }, - "event": "CLUSTER_SCALE_IN" - }, - "type": "senlin.policy.scaling", - "version": "1.0" - } - - def test_validate_request_body(self): - spec = copy.deepcopy(self.spec) - body = policies.PolicyValidateRequestBody(spec=spec) - - self.assertEqual(spec['type'], body.spec['type']) - self.assertEqual(spec['version'], body.spec['version']) - - def test_validate_request(self): - spec = copy.deepcopy(self.spec) - body = policies.PolicyValidateRequestBody(spec=spec) - policy = policies.PolicyValidateRequest(policy=body) - - self.assertIsInstance( - policy.policy, policies.PolicyValidateRequestBody) - - def test_request_body_to_primitive(self): - spec = copy.deepcopy(self.spec) - - sot = policies.PolicyValidateRequestBody(spec=spec) - res = sot.obj_to_primitive() - - self.assertIn('spec', res['senlin_object.changes']) - self.assertEqual( - 'PolicyValidateRequestBody', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - pd = res['senlin_object.data']['spec'] - data = jsonutils.loads(pd) - self.assertEqual('senlin.policy.scaling', data['type']) - self.assertEqual('1.0', data['version']) - - def test_request_to_primitive(self): - spec = copy.deepcopy(self.spec) - body = policies.PolicyValidateRequestBody(spec=spec) - policy = policies.PolicyValidateRequest(policy=body) - - res = policy.obj_to_primitive() - - self.assertIn('policy', res['senlin_object.changes']) - self.assertEqual('PolicyValidateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - body = res['senlin_object.data']['policy'] - - self.assertIn('spec', body['senlin_object.changes']) - self.assertEqual( - 'PolicyValidateRequestBody', body['senlin_object.name']) - self.assertEqual('senlin', body['senlin_object.namespace']) - self.assertEqual('1.0', body['senlin_object.version']) - - pd = body['senlin_object.data']['spec'] - data = jsonutils.loads(pd) - self.assertEqual('senlin.policy.scaling', data['type']) - self.assertEqual('1.0', data['version']) - - -class TestPolicyDelete(test_base.SenlinTestCase): - - def test_policy_delete(self): - sot = policies.PolicyDeleteRequest(identity='foo') - - self.assertEqual('foo', sot.identity) diff --git a/senlin/tests/unit/objects/requests/test_policy_type.py b/senlin/tests/unit/objects/requests/test_policy_type.py deleted file mode 100644 index ca000cae7..000000000 --- a/senlin/tests/unit/objects/requests/test_policy_type.py +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects.requests import policy_type -from senlin.tests.unit.common import base as test_base - - -class TestPolicyTypeGet(test_base.SenlinTestCase): - - def test_policy_type_get(self): - sot = policy_type.PolicyTypeGetRequest(type_name='Fake') - - self.assertEqual('Fake', sot.type_name) - - def test_policy_type_to_primitive(self): - sot = policy_type.PolicyTypeGetRequest(type_name='Fake') - - res = sot.obj_to_primitive() - - self.assertIn('type_name', res['senlin_object.changes']) - self.assertEqual(u'Fake', res['senlin_object.data']['type_name']) - self.assertEqual('PolicyTypeGetRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - -class TestPolicyTypeList(test_base.SenlinTestCase): - - def test_policy_type_list_to_primitive(self): - sot = policy_type.PolicyTypeListRequest() - res = sot.obj_to_primitive() - - self.assertEqual({}, res['senlin_object.data']) - self.assertEqual('PolicyTypeListRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) diff --git a/senlin/tests/unit/objects/requests/test_profile_type.py b/senlin/tests/unit/objects/requests/test_profile_type.py deleted file mode 100644 index 22c00df1e..000000000 --- a/senlin/tests/unit/objects/requests/test_profile_type.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects.requests import profile_type as vorp -from senlin.tests.unit.common import base as test_base - - -class TestProfileTypeGet(test_base.SenlinTestCase): - - def test_profile_type_get(self): - sot = vorp.ProfileTypeGetRequest(type_name='foo') - - self.assertEqual('foo', sot.type_name) - - def test_profile_type_to_primitive(self): - sot = vorp.ProfileTypeGetRequest(type_name='foo') - res = sot.obj_to_primitive() - - self.assertIn('type_name', res['senlin_object.changes']) - self.assertEqual(u'foo', res['senlin_object.data']['type_name']) - self.assertEqual('ProfileTypeGetRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - -class TestProfileTypeList(test_base.SenlinTestCase): - - def test_profile_type_list_to_primitive(self): - sot = vorp.ProfileTypeListRequest() - res = sot.obj_to_primitive() - - self.assertEqual({}, res['senlin_object.data']) - self.assertEqual('ProfileTypeListRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - -class TestProfileTypeOpList(test_base.SenlinTestCase): - - def test_profile_type_get(self): - sot = vorp.ProfileTypeOpListRequest(type_name='foo') - - self.assertEqual('foo', sot.type_name) - - def test_profile_type_op_list_to_primitive(self): - sot = vorp.ProfileTypeOpListRequest(type_name='foo') - res = sot.obj_to_primitive() - - self.assertIn('type_name', res['senlin_object.changes']) - self.assertEqual(u'foo', res['senlin_object.data']['type_name']) - self.assertEqual('ProfileTypeOpListRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) diff --git a/senlin/tests/unit/objects/requests/test_profiles.py b/senlin/tests/unit/objects/requests/test_profiles.py deleted file mode 100644 index 60c79daed..000000000 --- a/senlin/tests/unit/objects/requests/test_profiles.py +++ /dev/null @@ -1,264 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_serialization import jsonutils -from senlin.objects.requests import profiles -from senlin.tests.unit.common import base as test_base - - -class TestProfileCreate(test_base.SenlinTestCase): - - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'name': 'FAKE_SERVER_NAME', - 'flavor': 'FAKE_FLAVOR', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - 'networks': [{'network': 'FAKE_NET'}], - 'user_data': 'FAKE_USER_DATA' - } - } - - def test_profile_create_body(self): - spec = copy.deepcopy(self.spec) - sot = profiles.ProfileCreateRequestBody(name='foo', spec=spec, - metadata={'x': 'y'}) - self.assertEqual('foo', sot.name) - self.assertEqual({'x': 'y'}, sot.metadata) - self.assertEqual(u'os.nova.server', sot.spec['type']) - self.assertEqual(u'1.0', sot.spec['version']) - - def test_profile_create_request(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileCreateRequestBody(name='foo', spec=spec, - metadata={'x': 'y'}) - sot = profiles.ProfileCreateRequest(profile=body) - self.assertIsInstance(sot.profile, profiles.ProfileCreateRequestBody) - - def test_request_body_to_primitive(self): - spec = copy.deepcopy(self.spec) - sot = profiles.ProfileCreateRequestBody(name='test-profile', - spec=spec, - metadata={'x': 'y'}) - self.assertEqual('test-profile', sot.name) - self.assertEqual({'x': 'y'}, sot.metadata) - - res = sot.obj_to_primitive() - # request body - self.assertEqual('ProfileCreateRequestBody', res['senlin_object.name']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertIn('name', res['senlin_object.changes']) - self.assertIn('spec', res['senlin_object.changes']) - self.assertIn('metadata', res['senlin_object.changes']) - # spec - data = res['senlin_object.data'] - self.assertEqual(u'test-profile', data['name']) - self.assertEqual(u'{"x": "y"}', data['metadata']) - # spec data - spec_data = jsonutils.loads(data['spec']) - self.assertEqual(u'os.nova.server', spec_data['type']) - self.assertEqual(u'1.0', spec_data['version']) - - def test_request_to_primitive(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileCreateRequestBody(name='test-profile', - spec=spec, - metadata={'x': 'y'}) - sot = profiles.ProfileCreateRequest(profile=body) - self.assertIsInstance(sot.profile, profiles.ProfileCreateRequestBody) - self.assertEqual('test-profile', sot.profile.name) - self.assertEqual({'x': 'y'}, sot.profile.metadata) - - # request - res = sot.obj_to_primitive() - self.assertIn('profile', res['senlin_object.changes']) - self.assertEqual('ProfileCreateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - # request body - data = res['senlin_object.data']['profile'] - self.assertEqual('ProfileCreateRequestBody', - data['senlin_object.name']) - self.assertEqual('senlin', data['senlin_object.namespace']) - self.assertEqual('1.0', data['senlin_object.version']) - self.assertIn('name', data['senlin_object.changes']) - self.assertIn('spec', data['senlin_object.changes']) - self.assertIn('metadata', data['senlin_object.changes']) - - # spec - pd = data['senlin_object.data'] - self.assertEqual(u'test-profile', pd['name']) - - spec_data = jsonutils.loads(pd['spec']) - self.assertEqual(u'os.nova.server', spec_data['type']) - self.assertEqual(u'1.0', spec_data['version']) - - -class TestProfileList(test_base.SenlinTestCase): - - def test_profile_list_request_body_full(self): - params = { - 'name': ['p1'], - 'type': ['os.nova.server-1.0'], - 'limit': 2, - 'marker': 'd8d7dd1e-afd8-4921-83b2-c4ce73b1cb22', - 'sort': 'name:asc', - 'project_safe': False - } - sot = profiles.ProfileListRequest(**params) - self.assertEqual(['p1'], sot.name) - self.assertEqual(['os.nova.server-1.0'], sot.type) - self.assertEqual(2, sot.limit) - self.assertEqual('d8d7dd1e-afd8-4921-83b2-c4ce73b1cb22', - sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - -class TestProfileGet(test_base.SenlinTestCase): - - def test_profile_get(self): - sot = profiles.ProfileGetRequest(identity='FAKE_ID') - self.assertEqual('FAKE_ID', sot.identity) - - -class TestProfileUpdate(test_base.SenlinTestCase): - - def test_profile_update_body(self): - data = {'name': 'foo', 'metadata': {'aaa': 'bbb'}} - sot = profiles.ProfileUpdateRequestBody(**data) - self.assertEqual('foo', sot.name) - self.assertEqual({'aaa': 'bbb'}, sot.metadata) - - def test_profile_update(self): - data = {'name': 'foo', 'metadata': {'aaa': 'bbb'}} - body = profiles.ProfileUpdateRequestBody(**data) - - request = { - 'identity': 'pid', - 'profile': body - } - sot = profiles.ProfileUpdateRequest(**request) - self.assertEqual('pid', sot.identity) - self.assertIsInstance(sot.profile, profiles.ProfileUpdateRequestBody) - - def test_profile_data_to_primitive(self): - data = {'name': 'foo', 'metadata': {'aaa': 'bbb'}} - sot = profiles.ProfileUpdateRequestBody(**data) - res = sot.obj_to_primitive() - - self.assertIn('name', res['senlin_object.changes']) - self.assertIn('metadata', res['senlin_object.changes']) - self.assertEqual('foo', res['senlin_object.data']['name']) - self.assertEqual('{"aaa": "bbb"}', - res['senlin_object.data']['metadata']) - self.assertEqual('ProfileUpdateRequestBody', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - def test_request_to_primitive(self): - data = {'name': 'foo', 'metadata': {'aaa': 'bbb'}} - body = profiles.ProfileUpdateRequestBody(**data) - - request = { - 'identity': 'pid', - 'profile': body - } - sot = profiles.ProfileUpdateRequest(**request) - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertEqual(u'pid', res['senlin_object.data']['identity']) - self.assertEqual('ProfileUpdateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - -class TestProfileValidate(test_base.SenlinTestCase): - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'name': 'FAKE_SERVER_NAME', - 'flavor': 'FAKE_FLAVOR', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - 'networks': [{'network': 'FAKE_NET'}], - 'user_data': 'FAKE_USER_DATA' - } - } - - def test_validate_request_body(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileValidateRequestBody(spec=spec) - - self.assertEqual('os.nova.server', body.spec['type']) - self.assertEqual('1.0', body.spec['version']) - - def test_validate_request(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileValidateRequestBody(spec=spec) - - sot = profiles.ProfileValidateRequest(profile=body) - self.assertIsInstance(sot.profile, profiles.ProfileValidateRequestBody) - - def test_request_body_to_primitive(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileValidateRequestBody(spec=spec) - res = body.obj_to_primitive() - - self.assertIn('spec', res['senlin_object.changes']) - self.assertEqual( - 'ProfileValidateRequestBody', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - data = jsonutils.loads(res['senlin_object.data']['spec']) - self.assertEqual(u'os.nova.server', data['type']) - self.assertEqual(u'1.0', data['version']) - - def test_request_to_primitive(self): - spec = copy.deepcopy(self.spec) - body = profiles.ProfileValidateRequestBody(spec=spec) - - sot = profiles.ProfileValidateRequest(profile=body) - res = sot.obj_to_primitive() - - self.assertIn('profile', res['senlin_object.changes']) - self.assertEqual('ProfileValidateRequest', res['senlin_object.name']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual('1.0', res['senlin_object.version']) - - profile_body = res['senlin_object.data']['profile'] - self.assertIn('spec', profile_body['senlin_object.changes']) - - self.assertEqual( - 'ProfileValidateRequestBody', profile_body['senlin_object.name']) - self.assertEqual('senlin', profile_body['senlin_object.namespace']) - self.assertEqual('1.0', profile_body['senlin_object.version']) - - data = jsonutils.loads(profile_body['senlin_object.data']['spec']) - self.assertEqual(u'os.nova.server', data['type']) - self.assertEqual(u'1.0', data['version']) - - -class TestProfileDelete(test_base.SenlinTestCase): - - def test_profile_delete(self): - sot = profiles.ProfileDeleteRequest(identity='FAKE_ID') - - self.assertEqual('FAKE_ID', sot.identity) diff --git a/senlin/tests/unit/objects/requests/test_receivers.py b/senlin/tests/unit/objects/requests/test_receivers.py deleted file mode 100644 index 36771c179..000000000 --- a/senlin/tests/unit/objects/requests/test_receivers.py +++ /dev/null @@ -1,164 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg - -from senlin.common import consts -from senlin.objects.requests import receivers -from senlin.tests.unit.common import base as test_base - -CONF = cfg.CONF -CONF.import_opt('default_action_timeout', 'senlin.conf') - - -class TestReceiverCreate(test_base.SenlinTestCase): - - body = { - 'name': 'test-receiver', - 'type': 'message', - } - - def test_receiver_create_request_body(self): - sot = receivers.ReceiverCreateRequestBody(**self.body) - self.assertEqual('test-receiver', sot.name) - self.assertEqual('message', sot.type) - - sot.obj_set_defaults() - - self.assertEqual({}, sot.actor) - self.assertEqual({}, sot.params) - self.assertFalse(sot.obj_attr_is_set('action')) - self.assertFalse(sot.obj_attr_is_set('cluster_id')) - - def test_receiver_create_request_body_full(self): - body = copy.deepcopy(self.body) - body['type'] = 'webhook' - body['cluster_id'] = 'cluster-01' - body['action'] = consts.CLUSTER_SCALE_OUT - body['actor'] = {'user': 'user1', 'password': 'pass1'} - body['params'] = {'count': '1'} - sot = receivers.ReceiverCreateRequestBody(**body) - self.assertEqual('test-receiver', sot.name) - self.assertEqual('webhook', sot.type) - self.assertEqual('cluster-01', sot.cluster_id) - self.assertEqual(consts.CLUSTER_SCALE_OUT, sot.action) - self.assertEqual({'user': 'user1', 'password': 'pass1'}, sot.actor) - self.assertEqual({'count': '1'}, sot.params) - - def test_receiver_create_request_body_invalid_type(self): - body = copy.deepcopy(self.body) - body['type'] = 'Bogus' - ex = self.assertRaises(ValueError, receivers.ReceiverCreateRequestBody, - **body) - self.assertEqual("Value 'Bogus' is not acceptable for field 'type'.", - str(ex)) - - def test_receiver_create_request_body_invalid_action(self): - body = copy.deepcopy(self.body) - body['type'] = 'webhook' - body['cluster_id'] = 'cluster-01' - body['action'] = 'Foo' - - ex = self.assertRaises(ValueError, receivers.ReceiverCreateRequestBody, - **body) - self.assertEqual("Value 'Foo' is not acceptable for field 'action'.", - str(ex)) - - def test_receiver_create_request(self): - body = receivers.ReceiverCreateRequestBody(**self.body) - request = {'receiver': body} - sot = receivers.ReceiverCreateRequest(**request) - self.assertIsInstance(sot.receiver, - receivers.ReceiverCreateRequestBody) - self.assertEqual('test-receiver', sot.receiver.name) - self.assertEqual('message', sot.receiver.type) - - -class TestReceiverList(test_base.SenlinTestCase): - - def test_receiver_list_request_full(self): - params = { - 'name': ['receiver01'], - 'type': ['webhook'], - 'action': ['CLUSTER_RESIZE', 'CLUSTER_SCALE_IN'], - 'cluster_id': ['8c3c9af7-d768-4c5a-a21e-5261b22d749d'], - 'user': ['8cbac8cf571b41bd8e27fb1a4bcaa7d7'], - 'limit': 3, - 'marker': 'f1ed0d50-7651-4599-a8cb-c86e9c7123f5', - 'sort': 'name:asc', - 'project_safe': False, - } - sot = receivers.ReceiverListRequest(**params) - self.assertEqual(['receiver01'], sot.name) - self.assertEqual(['webhook'], sot.type) - self.assertEqual(['CLUSTER_RESIZE', 'CLUSTER_SCALE_IN'], sot.action) - self.assertEqual(['8c3c9af7-d768-4c5a-a21e-5261b22d749d'], - sot.cluster_id) - self.assertEqual(['8cbac8cf571b41bd8e27fb1a4bcaa7d7'], sot.user) - self.assertEqual(3, sot.limit) - self.assertEqual('f1ed0d50-7651-4599-a8cb-c86e9c7123f5', sot.marker) - self.assertEqual('name:asc', sot.sort) - self.assertFalse(sot.project_safe) - - def test_receiver_list_request_default(self): - sot = receivers.ReceiverListRequest() - sot.obj_set_defaults() - self.assertTrue(sot.project_safe) - - -class TestReceiverGet(test_base.SenlinTestCase): - - def test_receiver_get_request_full(self): - params = { - 'identity': 'receiver-001' - } - sot = receivers.ReceiverGetRequest(**params) - self.assertEqual('receiver-001', sot.identity) - - -class TestReceiverUpdate(test_base.SenlinTestCase): - data = { - 'name': 'receiver01', - 'type': 'webhook', - 'action': 'CLUSTER_SCALE_OUT', - 'params': {'count': '2'}, - } - - def test_receiver_update_request(self): - sot = receivers.ReceiverUpdateRequest(**self.data) - self.assertEqual('receiver01', sot.name) - self.assertEqual('webhook', sot.type) - self.assertEqual('CLUSTER_SCALE_OUT', sot.action), - self.assertEqual({'count': '2'}, sot.params) - - -class TestReceiverDelete(test_base.SenlinTestCase): - - body = { - 'identity': 'test-receiver' - } - - def test_receiver_delete_request(self): - sot = receivers.ReceiverDeleteRequest(**self.body) - self.assertEqual('test-receiver', sot.identity) - - -class TestReceiverNotify(test_base.SenlinTestCase): - - def test_receiver_notify_request(self): - params = { - 'identity': 'receiver-001' - } - sot = receivers.ReceiverNotifyRequest(**params) - self.assertEqual('receiver-001', sot.identity) diff --git a/senlin/tests/unit/objects/requests/test_webhooks.py b/senlin/tests/unit/objects/requests/test_webhooks.py deleted file mode 100644 index a2b1fbc93..000000000 --- a/senlin/tests/unit/objects/requests/test_webhooks.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.objects.requests import webhooks -from senlin.tests.unit.common import base as test_base - - -class TestWebhookTrigger(test_base.SenlinTestCase): - - def test_webhook_trigger_body_none(self): - sot = webhooks.WebhookTriggerRequestBody(params=None) - self.assertIsNone(sot.params) - - def test_webhook_trigger_body(self): - sot = webhooks.WebhookTriggerRequestBody(params={'foo': 'boo'}) - self.assertEqual({'foo': 'boo'}, sot.params) - - def test_webhook_trigger_body_to_primitive(self): - sot = sot = webhooks.WebhookTriggerRequestBody(params={'foo': 'boo'}) - res = sot.obj_to_primitive() - self.assertIn('params', res['senlin_object.changes']) - self.assertEqual({'params': '{"foo": "boo"}'}, - res['senlin_object.data']) - self.assertEqual( - 'WebhookTriggerRequestBody', res['senlin_object.name']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - - def test_webhook_trigger_none_param(self): - body = webhooks.WebhookTriggerRequestBody(params=None) - sot = webhooks.WebhookTriggerRequest(identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsInstance(sot.params, webhooks.WebhookTriggerRequestBody) - - def test_webhook_trigger(self): - body = webhooks.WebhookTriggerRequestBody(params={'foo': 'boo'}) - sot = webhooks.WebhookTriggerRequest(identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsInstance(sot.params, webhooks.WebhookTriggerRequestBody) - - def test_webhook_trigger_to_primitive(self): - body = webhooks.WebhookTriggerRequestBody(params={'foo': 'boo'}) - sot = webhooks.WebhookTriggerRequest(identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsInstance(sot.params, webhooks.WebhookTriggerRequestBody) - - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertIn('WebhookTriggerRequest', res['senlin_object.name']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual(u'fake', res['senlin_object.data']['identity']) - - def test_webhook_trigger_params_in_body_none_param(self): - body = None - sot = webhooks.WebhookTriggerRequestParamsInBody( - identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsNone(sot.params) - - def test_webhook_trigger_params_in_body(self): - body = {'foo': 'boo'} - sot = webhooks.WebhookTriggerRequestParamsInBody( - identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsInstance(sot.params, dict) - - def test_webhook_trigger_params_in_body_to_primitive(self): - body = {'foo': 'boo'} - sot = webhooks.WebhookTriggerRequestParamsInBody( - identity='fake', params=body) - self.assertEqual('fake', sot.identity) - self.assertIsInstance(sot.params, dict) - - res = sot.obj_to_primitive() - - self.assertIn('identity', res['senlin_object.changes']) - self.assertIn('WebhookTriggerRequest', res['senlin_object.name']) - self.assertEqual('1.0', res['senlin_object.version']) - self.assertEqual('senlin', res['senlin_object.namespace']) - self.assertEqual(u'fake', res['senlin_object.data']['identity']) diff --git a/senlin/tests/unit/objects/test_action.py b/senlin/tests/unit/objects/test_action.py deleted file mode 100644 index bbc4a3c51..000000000 --- a/senlin/tests/unit/objects/test_action.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -import testtools - -from senlin.common import exception as exc -from senlin.objects import action as ao - - -class TestAction(testtools.TestCase): - - def setUp(self): - super(TestAction, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(ao.Action, 'get') - def test_find_by_uuid(self, mock_get): - x_action = mock.Mock() - mock_get.return_value = x_action - aid = uuidutils.generate_uuid() - - result = ao.Action.find(self.ctx, aid) - - self.assertEqual(x_action, result) - mock_get.assert_called_once_with(self.ctx, aid) - - @mock.patch.object(ao.Action, 'get_by_name') - @mock.patch.object(ao.Action, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_name): - mock_get.return_value = None - x_action = mock.Mock() - mock_name.return_value = x_action - aid = uuidutils.generate_uuid() - - result = ao.Action.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_action, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_name.assert_called_once_with(self.ctx, aid, project_safe=False) - - @mock.patch.object(ao.Action, 'get_by_name') - def test_find_by_name(self, mock_name): - x_action = mock.Mock() - mock_name.return_value = x_action - aid = 'not-a-uuid' - - result = ao.Action.find(self.ctx, aid, project_safe=True) - - self.assertEqual(x_action, result) - mock_name.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(ao.Action, 'get_by_short_id') - @mock.patch.object(ao.Action, 'get_by_name') - def test_find_by_short_id(self, mock_name, mock_shortid): - mock_name.return_value = None - x_action = mock.Mock() - mock_shortid.return_value = x_action - aid = 'abcdef' - - result = ao.Action.find(self.ctx, aid) - - self.assertEqual(x_action, result) - mock_name.assert_called_once_with(self.ctx, aid) - mock_shortid.assert_called_once_with(self.ctx, aid) - - @mock.patch.object(ao.Action, 'get_by_name') - @mock.patch.object(ao.Action, 'get_by_short_id') - def test_find_not_found(self, mock_shortid, mock_name): - mock_name.return_value = None - mock_shortid.return_value = None - - ex = self.assertRaises(exc.ResourceNotFound, - ao.Action.find, - self.ctx, 'BOGUS') - self.assertEqual("The action 'BOGUS' could not be found.", - str(ex)) - mock_name.assert_called_once_with(self.ctx, 'BOGUS') - mock_shortid.assert_called_once_with(self.ctx, 'BOGUS') diff --git a/senlin/tests/unit/objects/test_base.py b/senlin/tests/unit/objects/test_base.py deleted file mode 100644 index 0c79e7ea3..000000000 --- a/senlin/tests/unit/objects/test_base.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_versionedobjects import base as ovo_base -from oslo_versionedobjects import exception as exc - -from senlin.objects import base as obj_base -from senlin.objects import fields as obj_fields -from senlin.tests.unit.common import base - - -class FakeObject(obj_base.SenlinObject): - - VERSION_MAP = { - '1.3': '1.2' - } - - -class TestBaseObject(base.SenlinTestCase): - - def test_base_class(self): - obj = obj_base.SenlinObject() - self.assertEqual(obj_base.SenlinObject.OBJ_PROJECT_NAMESPACE, - obj.OBJ_PROJECT_NAMESPACE) - self.assertEqual(obj_base.SenlinObject.VERSION, - obj.VERSION) - - @mock.patch.object(obj_base.SenlinObject, "obj_reset_changes") - def test_from_db_object(self, mock_obj_reset_ch): - class TestSenlinObject(obj_base.SenlinObject, - obj_base.VersionedObjectDictCompat): - fields = { - "key1": obj_fields.StringField(), - "key2": obj_fields.StringField(), - "metadata": obj_fields.JsonField() - } - - obj = TestSenlinObject() - context = mock.Mock() - db_obj = { - "key1": "value1", - "key2": "value2", - "meta_data": {"key3": "value3"} - } - res = obj_base.SenlinObject._from_db_object(context, obj, db_obj) - self.assertIsNotNone(res) - self.assertEqual("value1", obj["key1"]) - self.assertEqual("value2", obj["key2"]) - self.assertEqual({"key3": "value3"}, obj["metadata"]) - self.assertEqual(obj._context, context) - mock_obj_reset_ch.assert_called_once_with() - - def test_from_db_object_none(self): - obj = obj_base.SenlinObject() - db_obj = None - context = mock.Mock() - - res = obj_base.SenlinObject._from_db_object(context, obj, db_obj) - self.assertIsNone(res) - - def test_to_json_schema(self): - obj = obj_base.SenlinObject() - self.assertRaises(exc.UnsupportedObjectError, obj.to_json_schema) - - @mock.patch.object(ovo_base.VersionedObject, 'obj_class_from_name') - def test_obj_class_from_name_with_version(self, mock_convert): - res = obj_base.SenlinObject.obj_class_from_name('Foo', '1.23') - - self.assertEqual(mock_convert.return_value, res) - mock_convert.assert_called_once_with('Foo', '1.23') - - @mock.patch.object(ovo_base.VersionedObject, 'obj_class_from_name') - def test_obj_class_from_name_no_version(self, mock_convert): - res = obj_base.SenlinObject.obj_class_from_name('Foo') - - self.assertEqual(mock_convert.return_value, res) - mock_convert.assert_called_once_with( - 'Foo', obj_base.SenlinObject.VERSION) - - def test_find_version_default(self): - ctx = mock.Mock(api_version='1.1') - - res = FakeObject.find_version(ctx) - - self.assertEqual('1.0', res) - - def test_find_version_match(self): - ctx = mock.Mock(api_version='1.3') - - res = FakeObject.find_version(ctx) - - self.assertEqual('1.2', res) - - def test_find_version_above(self): - ctx = mock.Mock(api_version='1.4') - - res = FakeObject.find_version(ctx) - - self.assertEqual('1.2', res) - - def test_normalize_req(self): - req = {'primary': {'bar': 'zoo'}} - name = 'reqname' - key = 'primary' - expected = { - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': obj_base.SenlinObject.VERSION, - 'senlin_object.name': name, - 'senlin_object.data': { - 'primary': { - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': obj_base.SenlinObject.VERSION, - 'senlin_object.name': 'reqnameBody', - 'senlin_object.data': { - 'bar': 'zoo' - } - } - } - } - - res = obj_base.SenlinObject.normalize_req(name, req, key) - - self.assertEqual(expected, res) - - def test_normalize_req_no_key(self): - req = {'bar': 'zoo'} - name = 'reqname' - expected = { - 'senlin_object.namespace': 'senlin', - 'senlin_object.version': obj_base.SenlinObject.VERSION, - 'senlin_object.name': name, - 'senlin_object.data': { - 'bar': 'zoo' - } - } - - res = obj_base.SenlinObject.normalize_req(name, req) - - self.assertEqual(expected, res) - - def test_normalize_req_missing_key(self): - req = {'bar': 'zoo'} - name = 'reqname' - - ex = self.assertRaises(ValueError, - obj_base.SenlinObject.normalize_req, - name, req, 'foo') - - self.assertEqual("Request body missing 'foo' key.", str(ex)) diff --git a/senlin/tests/unit/objects/test_cluster.py b/senlin/tests/unit/objects/test_cluster.py deleted file mode 100644 index 7ebb53e8a..000000000 --- a/senlin/tests/unit/objects/test_cluster.py +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.common import exception as exc -from senlin.objects import cluster as co -from senlin.objects import cluster_policy as cpo -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestCluster(base.SenlinTestCase): - - def setUp(self): - super(TestCluster, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(co.Cluster, 'get') - def test_find_by_uuid(self, mock_get): - x_cluster = mock.Mock() - mock_get.return_value = x_cluster - aid = uuidutils.generate_uuid() - - result = co.Cluster.find(self.ctx, aid) - - self.assertEqual(x_cluster, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(co.Cluster, 'get_by_name') - @mock.patch.object(co.Cluster, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_get_name): - x_cluster = mock.Mock() - mock_get_name.return_value = x_cluster - mock_get.return_value = None - - aid = uuidutils.generate_uuid() - result = co.Cluster.find(self.ctx, aid, False) - - self.assertEqual(x_cluster, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_by_name(self, mock_get_name): - x_cluster = mock.Mock() - mock_get_name.return_value = x_cluster - aid = 'this-is-not-uuid' - - result = co.Cluster.find(self.ctx, aid) - - self.assertEqual(x_cluster, result) - mock_get_name.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(co.Cluster, 'get_by_short_id') - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_by_shortid(self, mock_get_name, mock_get_shortid): - x_cluster = mock.Mock() - mock_get_shortid.return_value = x_cluster - mock_get_name.return_value = None - aid = 'abcd-1234-abcd' - - result = co.Cluster.find(self.ctx, aid, False) - - self.assertEqual(x_cluster, result) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - mock_get_shortid.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(co.Cluster, 'get_by_short_id') - @mock.patch.object(co.Cluster, 'get_by_name') - def test_find_not_found(self, mock_get_name, mock_get_short_id): - mock_get_name.return_value = None - mock_get_short_id.return_value = None - - self.assertRaises(exc.ResourceNotFound, - co.Cluster.find, - self.ctx, 'bogus') - - mock_get_name.assert_called_once_with(self.ctx, 'bogus', - project_safe=True) - mock_get_short_id.assert_called_once_with(self.ctx, 'bogus', - project_safe=True) - - def test_to_dict(self): - PROFILE_ID = '96f4df4b-889e-4184-ba8d-b5ca122f95bb' - POLICY1_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - POLICY2_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536d3' - NODE1_ID = '26f4df4b-889e-4184-ba8d-b5ca122f9566' - NODE2_ID = '26f4df4b-889e-4184-ba8d-b5ca122f9567' - - utils.create_profile(self.ctx, PROFILE_ID) - policy_1 = utils.create_policy(self.ctx, POLICY1_ID, 'P1') - policy_2 = utils.create_policy(self.ctx, POLICY2_ID, 'P2') - - values = { - 'profile_id': PROFILE_ID, - 'name': 'test-cluster', - 'desired_capacity': 1, - 'status': 'INIT', - 'init_at': timeutils.utcnow(True), - 'max_size': -1, - 'min_size': 0, - 'timeout': cfg.CONF.default_action_timeout, - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - } - cluster = co.Cluster.create(self.ctx, values) - p1 = cpo.ClusterPolicy(cluster_id=cluster.id, policy_id=policy_1.id, - enabled=True, id=uuidutils.generate_uuid(), - last_op=None) - p2 = cpo.ClusterPolicy(cluster_id=cluster.id, policy_id=policy_2.id, - enabled=True, id=uuidutils.generate_uuid(), - last_op=None) - values = { - 'priority': 12, - 'enabled': True, - } - p1.create(self.ctx, cluster.id, POLICY1_ID, values) - p2.create(self.ctx, cluster.id, POLICY2_ID, values) - utils.create_node(self.ctx, NODE1_ID, PROFILE_ID, cluster.id) - utils.create_node(self.ctx, NODE2_ID, PROFILE_ID, cluster.id) - cluster = co.Cluster.get(self.ctx, cluster.id) - expected = { - 'id': cluster.id, - 'name': cluster.name, - 'profile_id': PROFILE_ID, - 'user': cluster.user, - 'project': cluster.project, - 'domain': cluster.domain, - 'init_at': mock.ANY, - 'created_at': None, - 'updated_at': None, - 'min_size': 0, - 'max_size': -1, - 'desired_capacity': 1, - 'timeout': cfg.CONF.default_action_timeout, - 'status': str('INIT'), - 'status_reason': None, - 'metadata': {}, - 'data': {}, - 'dependents': {}, - 'config': {}, - 'nodes': [mock.ANY, mock.ANY], - 'policies': [mock.ANY, mock.ANY], - 'profile_name': str('test-profile'), - } - cluster_dict = cluster.to_dict() - - self.assertEqual(expected, cluster_dict) - self.assertEqual(2, len(cluster_dict['nodes'])) - self.assertEqual(2, len(cluster_dict['policies'])) diff --git a/senlin/tests/unit/objects/test_event.py b/senlin/tests/unit/objects/test_event.py deleted file mode 100644 index 49f4f7858..000000000 --- a/senlin/tests/unit/objects/test_event.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -import testtools - -from senlin.common import exception as exc -from senlin.objects import event as eo - - -class TestEvent(testtools.TestCase): - - def setUp(self): - super(TestEvent, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(eo.Event, 'get') - def test_find_by_uuid(self, mock_get): - x_event = mock.Mock() - mock_get.return_value = x_event - aid = uuidutils.generate_uuid() - - result = eo.Event.find(self.ctx, aid) - - self.assertEqual(x_event, result) - mock_get.assert_called_once_with(self.ctx, aid) - - @mock.patch.object(eo.Event, 'get_by_short_id') - @mock.patch.object(eo.Event, 'get') - def test_find_by_short_id(self, mock_get, mock_shortid): - mock_get.return_value = None - x_event = mock.Mock() - mock_shortid.return_value = x_event - aid = uuidutils.generate_uuid() - - result = eo.Event.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_event, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_shortid.assert_called_once_with(self.ctx, aid, project_safe=False) - - @mock.patch.object(eo.Event, 'get_by_short_id') - def test_find_by_short_id_directly(self, mock_shortid): - x_event = mock.Mock() - mock_shortid.return_value = x_event - aid = 'abcdef' - - result = eo.Event.find(self.ctx, aid, project_safe=True) - - self.assertEqual(x_event, result) - mock_shortid.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(eo.Event, 'get_by_short_id') - def test_find_not_found(self, mock_shortid): - mock_shortid.return_value = None - - ex = self.assertRaises(exc.ResourceNotFound, - eo.Event.find, - self.ctx, 'BOGUS') - self.assertEqual("The event 'BOGUS' could not be found.", - str(ex)) - mock_shortid.assert_called_once_with(self.ctx, 'BOGUS') diff --git a/senlin/tests/unit/objects/test_fields.py b/senlin/tests/unit/objects/test_fields.py deleted file mode 100644 index 1c6f05a03..000000000 --- a/senlin/tests/unit/objects/test_fields.py +++ /dev/null @@ -1,767 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_versionedobjects import fields -import testtools - -from senlin.common import consts -from senlin.objects import fields as senlin_fields - -CONF = cfg.CONF - - -class FakeFieldType(fields.FieldType): - def coerce(self, obj, attr, value): - return '*%s*' % value - - def to_primitive(self, obj, attr, value): - return '!%s!' % value - - def from_primitive(self, obj, attr, value): - return value[1:-1] - - -class TestField(testtools.TestCase): - - def setUp(self): - super(TestField, self).setUp() - self.field = fields.Field(FakeFieldType()) - self.coerce_good_values = [('foo', '*foo*')] - self.coerce_bad_values = [] - self.to_primitive_values = [('foo', '!foo!')] - self.from_primitive_values = [('!foo!', 'foo')] - - def test_coerce_good_values(self): - for in_val, out_val in self.coerce_good_values: - self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) - - def test_coerce_bad_values(self): - for in_val in self.coerce_bad_values: - self.assertRaises((TypeError, ValueError), - self.field.coerce, 'obj', 'attr', in_val) - - def test_to_primitive(self): - for in_val, prim_val in self.to_primitive_values: - self.assertEqual(prim_val, - self.field.to_primitive('obj', 'attr', in_val)) - - def test_from_primitive(self): - class ObjectLikeThing(object): - _context = 'context' - - for prim_val, out_val in self.from_primitive_values: - self.assertEqual(out_val, - self.field.from_primitive(ObjectLikeThing, 'attr', - prim_val)) - - def test_stringify(self): - self.assertEqual('123', self.field.stringify(123)) - - -class TestBoolean(TestField): - - def setUp(self): - super(TestBoolean, self).setUp() - - self.field = senlin_fields.BooleanField() - self.coerce_good_values = [ - ('True', True), - ('T', True), - ('t', True), - ('1', True), - ('yes', True), - ('on', True), - ('False', False), - ('F', False), - ('f', False), - ('0', False), - ('no', False), - ('off', False) - ] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = [ - (True, True), - (False, False) - ] - - self.from_primitive_values = [ - ('True', 'True'), - ('False', 'False') - ] - - def test_stringify(self): - self.assertEqual('True', self.field.stringify(True)) - self.assertEqual('False', self.field.stringify(False)) - - -class TestJson(TestField): - def setUp(self): - super(TestJson, self).setUp() - - self.field = senlin_fields.JsonField() - self.coerce_good_values = [('{"k": "v"}', {"k": "v"})] - self.coerce_bad_values = ['{"K": "v"]'] - self.to_primitive_values = [({"k": "v"}, '{"k": "v"}')] - self.from_primitive_values = [('{"k": "v"}', {"k": "v"})] - - def test_stringify(self): - self.assertEqual("{'k': 'v'}", self.field.stringify({"k": "v"})) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, - self.field.stringify, self.coerce_bad_values[0]) - - def test_get_schema(self): - self.assertEqual( - {'type': ['object'], 'readonly': False}, - self.field.get_schema() - ) - - -class TestUniqueDict(TestField): - - def setUp(self): - super(TestUniqueDict, self).setUp() - - self.field = senlin_fields.UniqueDict(fields.String()) - self.coerce_good_values = [({"k": "v"}, {"k": "v"})] - self.coerce_bad_values = ['{"K": "v"]'] - self.to_primitive_values = [({"k": "v"}, {"k": "v"})] - self.from_primitive_values = [({"k": "v"}, {"k": "v"})] - - def test_stringify(self): - self.assertEqual("{k='v'}", self.field.stringify({"k": "v"})) - - def test_coerce(self): - res = self.field.coerce(None, 'attr', {'k1': 'v1'}) - self.assertEqual({'k1': 'v1'}, res) - - def test_coerce_failed_duplicate(self): - ex = self.assertRaises(ValueError, - self.field.coerce, - None, 'attr', {'k1': 'v1', 'k2': 'v1'}) - - self.assertEqual('Map contains duplicated values', - str(ex)) - - -class TestNotificationPriority(TestField): - def setUp(self): - super(TestNotificationPriority, self).setUp() - - self.field = senlin_fields.NotificationPriorityField() - self.coerce_good_values = [('audit', 'audit'), - ('critical', 'critical'), - ('debug', 'debug'), - ('error', 'error'), - ('sample', 'sample'), - ('warn', 'warn')] - self.coerce_bad_values = ['warning'] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'warn'", self.field.stringify('warn')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'warning') - - -class TestNotificationPhase(TestField): - def setUp(self): - super(TestNotificationPhase, self).setUp() - - self.field = senlin_fields.NotificationPhaseField() - self.coerce_good_values = [('start', 'start'), - ('end', 'end'), - ('error', 'error')] - self.coerce_bad_values = ['begin'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'error'", self.field.stringify('error')) - - def test_stringify_invalid(self): - self.assertRaises(ValueError, self.field.stringify, 'begin') - - -class TestName(TestField): - - def setUp(self): - super(TestName, self).setUp() - - self.field = senlin_fields.NameField() - self.coerce_good_values = [ - ('name1', 'name1'), # plain string - ('name2.sec', 'name2.sec'), # '.' okay - ('123-sec', '123-sec'), # '-' okay - ('123_sec', '123_sec'), # '_' okay - ('123~sec', '123~sec'), # '~' okay - ('557', '557'), # pure numeric okay - ] - self.coerce_bad_values = [ - '', # too short - 's' * 300, # too long - 'ab/', # '/' illegal - 's123$', # '$' illegal - '13^gadf', # '^' illegal - 'sad&cheer', # '&' illegal - 'boo**', # '*' illegal - 'kwsqu()', # '(' and ')' illegal - 'bing+bang', # '+' illegal - 'var=value', # '=' illegal - 'quicksort[1]', # '[' and ']' illegal - 'sdi{"gh"}', # '{' and '}' illegal - 'gate open', # ' ' illegal - '12.64%', # '%' illegal - 'name#sign', # '#' illegal - 'back\slash', # '\' illegal - ' leading', # leading blank illegal - 'trailing ', # trailing blank illegal - '!okay', # '!' illegal - '@author', # '@' illegal - '`info`', # '`' illegal - '"partial', # '"' illegal - "'single", # ''' illegal - 'min', # '>' illegal - 'question?', # '?' illegal - 'first,second', # ',' illegal - ] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'name1'", self.field.stringify('name1')) - - def test_init(self): - sot = senlin_fields.Name(2, 200) - - self.assertEqual(2, sot.min_len) - self.assertEqual(200, sot.max_len) - - def test_coerce_failed(self): - obj = mock.Mock() - sot = senlin_fields.Name() - - ex = self.assertRaises(ValueError, - sot.coerce, - obj, 'attr', 'value/bad') - self.assertEqual("The value for the 'attr' (value/bad) contains " - "illegal characters. It must contain only " - "alphanumeric or \"_-.~\" characters and must start " - "with letter.", - str(ex)) - - def test_get_schema(self): - sot = senlin_fields.Name(2, 200) - self.assertEqual( - { - 'type': ['string'], - 'minLength': 2, - 'maxLength': 200 - }, - sot.get_schema() - ) - - def test_get_schema_default(self): - sot = senlin_fields.Name() - self.assertEqual( - { - 'type': ['string'], - 'minLength': 1, - 'maxLength': 255 - }, - sot.get_schema() - ) - - -class TestCapacity(TestField): - - def setUp(self): - super(TestCapacity, self).setUp() - - self.field = senlin_fields.CapacityField() - self.coerce_good_values = [ - (100, 100), # plain integer - ('100', 100), # string of integer - ('0123', 123), # leading zeros ignored - ] - self.coerce_bad_values = [ - -1, # less than 0 - 'strval', # illegal value - ] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual('100', self.field.stringify(100)) - self.assertEqual('100', self.field.stringify('100')) - - def test_init(self): - CONF.set_override('max_nodes_per_cluster', 300) - sot = senlin_fields.Capacity() - - self.assertEqual(0, sot.minimum) - self.assertEqual(300, sot.maximum) - - def test_init_with_values(self): - CONF.set_override('max_nodes_per_cluster', 300) - sot = senlin_fields.Capacity(2, 200) - - self.assertEqual(2, sot.minimum) - self.assertEqual(200, sot.maximum) - - def test_init_invalid(self): - CONF.set_override('max_nodes_per_cluster', 100) - - ex = self.assertRaises(ValueError, - senlin_fields.Capacity, - minimum=101) - self.assertEqual("The value of 'minimum' cannot be greater than the " - "global constraint (100).", str(ex)) - - ex = self.assertRaises(ValueError, - senlin_fields.Capacity, - maximum=101) - self.assertEqual("The value of 'maximum' cannot be greater than the " - "global constraint (100).", str(ex)) - - ex = self.assertRaises(ValueError, - senlin_fields.Capacity, - minimum=60, maximum=40) - self.assertEqual("The value of 'maximum' must be greater than or equal" - " to that of the 'minimum' specified.", - str(ex)) - - def test_coerce(self): - sot = senlin_fields.Capacity(minimum=2, maximum=200) - obj = mock.Mock() - res = sot.coerce(obj, 'attr', 12) - self.assertEqual(12, res) - res = sot.coerce(obj, 'attr', 2) - self.assertEqual(2, res) - res = sot.coerce(obj, 'attr', 200) - self.assertEqual(200, res) - - sot = senlin_fields.Capacity() - - res = sot.coerce(obj, 'attr', 12) - self.assertEqual(12, res) - res = sot.coerce(obj, 'attr', 0) - self.assertEqual(0, res) - res = sot.coerce(obj, 'attr', CONF.max_nodes_per_cluster) - self.assertEqual(CONF.max_nodes_per_cluster, res) - - def test_coerce_failed(self): - sot = senlin_fields.Capacity(minimum=2, maximum=200) - obj = mock.Mock() - - ex = self.assertRaises(ValueError, - sot.coerce, - obj, 'attr', 1) - self.assertEqual("The value for the attr field must be greater than " - "or equal to 2.", str(ex)) - - ex = self.assertRaises(ValueError, - sot.coerce, - obj, 'attr', 201) - self.assertEqual("The value for the attr field must be less than " - "or equal to 200.", str(ex)) - - ex = self.assertRaises(ValueError, - sot.coerce, - obj, 'attr', 'badvalue') - self.assertEqual("The value for attr must be an integer: 'badvalue'.", - str(ex)) - - def test_get_schema(self): - sot = senlin_fields.Capacity(minimum=2, maximum=200) - self.assertEqual( - { - 'type': ['integer', 'string'], - 'minimum': 2, - 'maximum': 200, - 'pattern': '^[0-9]*$', - }, - sot.get_schema() - ) - - def test_get_schema_default(self): - cfg.CONF.set_override('max_nodes_per_cluster', 100) - sot = senlin_fields.Capacity() - self.assertEqual( - { - 'type': ['integer', 'string'], - 'minimum': 0, - 'maximum': 100, - 'pattern': '^[0-9]*$', - }, - sot.get_schema() - ) - - -class TestSort(TestField): - - def setUp(self): - super(TestSort, self).setUp() - - self.keys = ['key1', 'key2', 'key3'] - self.field = senlin_fields.Sort(valid_keys=self.keys) - self.coerce_good_values = [ - ('key1', 'key1'), # single key - ('key1,key2', 'key1,key2'), # multi keys - ('key1:asc', 'key1:asc'), # key with dir - ('key2:desc', 'key2:desc'), # key with different dir - ('key1,key2:asc', 'key1,key2:asc'), # mixed case - ] - self.coerce_bad_values = [ - 'foo', # unknown key - ':desc', # unspecified key - 'key1:up', # unsupported dir - 'key1,key2:up', # unsupported dir - 'foo,key2', # unknown key - 'key2,:asc', # unspecified key - 'key2,:desc', # unspecified key - 'key1,', # missing key - ',key2', # missing key - ] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'key1,key2'", self.field.stringify('key1,key2')) - - def test_init(self): - keys = ['foo', 'bar'] - sot = senlin_fields.Sort(valid_keys=keys) - - self.assertEqual(keys, sot.valid_keys) - - def test_coerce_failure(self): - obj = mock.Mock() - ex = self.assertRaises(ValueError, - self.field.coerce, - obj, 'attr', ':asc') - self.assertEqual("Missing sort key for 'attr'.", str(ex)) - - ex = self.assertRaises(ValueError, - self.field.coerce, - obj, 'attr', 'foo:asc') - self.assertEqual("Unsupported sort key 'foo' for 'attr'.", - str(ex)) - - ex = self.assertRaises(ValueError, - self.field.coerce, - obj, 'attr', 'key1:down') - self.assertEqual("Unsupported sort dir 'down' for 'attr'.", - str(ex)) - - def test_get_schema(self): - self.assertEqual( - {'type': ['string']}, - self.field.get_schema() - ) - - -class TestIdentityList(TestField): - - def setUp(self): - super(TestIdentityList, self).setUp() - - self.field = senlin_fields.IdentityList(fields.String()) - - self.coerce_good_values = [ - (['abc'], ['abc']) - ] - self.coerce_bad_values = [ - 123 - ] - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("['abc','def']", - self.field.stringify(['abc', 'def'])) - - def test_init_with_params(self): - sot = senlin_fields.IdentityList(fields.String(), min_items=1, - unique=False) - - self.assertEqual(1, sot.min_items) - self.assertFalse(sot.unique_items) - - def test_coerce_not_unique_okay(self): - sot = senlin_fields.IdentityList(fields.String(), min_items=1, - unique=False) - obj = mock.Mock() - - # not unique is okay - res = sot.coerce(obj, 'attr', ['abc', 'abc']) - self.assertEqual(['abc', 'abc'], res) - - def test_coerce_too_short(self): - sot = senlin_fields.IdentityList(fields.String(), min_items=2, - unique=False) - obj = mock.Mock() - - # violating min_items - ex = self.assertRaises(ValueError, - sot.coerce, - obj, 'attr', []) - - self.assertEqual("Value for 'attr' must have at least 2 item(s).", - str(ex)) - - def test_coerce_not_unique_bad(self): - obj = mock.Mock() - - # violating min_items - ex = self.assertRaises(ValueError, - self.field.coerce, - obj, 'attr', ['abc', 'abc']) - - self.assertEqual("Items for 'attr' must be unique", - str(ex)) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['array'], - 'items': { - 'readonly': False, - 'type': ['string'], - }, - 'minItems': 0, - 'uniqueItems': True - }, - self.field.get_schema() - ) - - sot = senlin_fields.IdentityList(fields.String(), min_items=2, - unique=False, nullable=True) - self.assertEqual( - { - 'type': ['array', 'null'], - 'items': { - 'readonly': False, - 'type': ['string'], - }, - 'minItems': 2, - 'uniqueItems': False - }, - sot.get_schema() - ) - - -class TestAdjustmentTypeField(TestField): - - def setUp(self): - super(TestAdjustmentTypeField, self).setUp() - - self.field = senlin_fields.AdjustmentTypeField() - self.coerce_good_values = [ - ('EXACT_CAPACITY', 'EXACT_CAPACITY'), - ('CHANGE_IN_CAPACITY', 'CHANGE_IN_CAPACITY'), - ('CHANGE_IN_PERCENTAGE', 'CHANGE_IN_PERCENTAGE') - ] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'EXACT_CAPACITY'", - self.field.stringify('EXACT_CAPACITY')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'readonly': False, - 'enum': ['EXACT_CAPACITY', 'CHANGE_IN_CAPACITY', - 'CHANGE_IN_PERCENTAGE'] - }, - self.field.get_schema() - ) - - -class TestAdjustmentType(TestField): - def setUp(self): - super(TestAdjustmentType, self).setUp() - - self.field = senlin_fields.AdjustmentType() - self.coerce_good_values = [ - ('EXACT_CAPACITY', 'EXACT_CAPACITY'), - ('CHANGE_IN_CAPACITY', 'CHANGE_IN_CAPACITY'), - ('CHANGE_IN_PERCENTAGE', 'CHANGE_IN_PERCENTAGE') - ] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'EXACT_CAPACITY'", - self.field.stringify('EXACT_CAPACITY')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'enum': ['EXACT_CAPACITY', 'CHANGE_IN_CAPACITY', - 'CHANGE_IN_PERCENTAGE'] - }, - self.field.get_schema() - ) - - -class TestClusterActionNameField(TestField): - - def setUp(self): - super(TestClusterActionNameField, self).setUp() - self.field = senlin_fields.ClusterActionNameField() - self.coerce_good_values = [ - (action, action) for action in consts.CLUSTER_ACTION_NAMES] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'CLUSTER_RESIZE'", - self.field.stringify('CLUSTER_RESIZE')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'readonly': False, - 'enum': ['CLUSTER_CREATE', 'CLUSTER_DELETE', - 'CLUSTER_UPDATE', 'CLUSTER_ADD_NODES', - 'CLUSTER_DEL_NODES', 'CLUSTER_RESIZE', - 'CLUSTER_CHECK', 'CLUSTER_RECOVER', - 'CLUSTER_REPLACE_NODES', 'CLUSTER_SCALE_OUT', - 'CLUSTER_SCALE_IN', 'CLUSTER_ATTACH_POLICY', - 'CLUSTER_DETACH_POLICY', 'CLUSTER_UPDATE_POLICY', - 'CLUSTER_OPERATION'] - }, - self.field.get_schema() - ) - - -class TestClusterActionName(TestField): - - def setUp(self): - super(TestClusterActionName, self).setUp() - self.field = senlin_fields.ClusterActionName() - self.coerce_good_values = [ - (action, action) for action in consts.CLUSTER_ACTION_NAMES] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'CLUSTER_RESIZE'", - self.field.stringify('CLUSTER_RESIZE')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'enum': ['CLUSTER_CREATE', 'CLUSTER_DELETE', - 'CLUSTER_UPDATE', 'CLUSTER_ADD_NODES', - 'CLUSTER_DEL_NODES', 'CLUSTER_RESIZE', - 'CLUSTER_CHECK', 'CLUSTER_RECOVER', - 'CLUSTER_REPLACE_NODES', 'CLUSTER_SCALE_OUT', - 'CLUSTER_SCALE_IN', 'CLUSTER_ATTACH_POLICY', - 'CLUSTER_DETACH_POLICY', 'CLUSTER_UPDATE_POLICY', - 'CLUSTER_OPERATION'] - }, - self.field.get_schema() - ) - - -class TestReceiverTypeField(TestField): - - def setUp(self): - super(TestReceiverTypeField, self).setUp() - self.field = senlin_fields.ReceiverTypeField() - self.coerce_good_values = [ - (action, action) for action in consts.RECEIVER_TYPES] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'message'", - self.field.stringify('message')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'readonly': False, - 'enum': ['webhook', 'message'] - }, - self.field.get_schema() - ) - - -class TestReceiverType(TestField): - - def setUp(self): - super(TestReceiverType, self).setUp() - self.field = senlin_fields.ReceiverType() - self.coerce_good_values = [ - (action, action) for action in consts.RECEIVER_TYPES] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = self.coerce_good_values[0:1] - self.from_primitive_values = self.coerce_good_values[0:1] - - def test_stringify(self): - self.assertEqual("'message'", - self.field.stringify('message')) - - def test_get_schema(self): - self.assertEqual( - { - 'type': ['string'], - 'enum': ['webhook', 'message'] - }, - self.field.get_schema() - ) - - -class TestCustomField(TestField): - def setUp(self): - super(TestCustomField, self).setUp() - self.field = senlin_fields.CustomListField(attr_name='dependant') - dep = mock.Mock() - dep.dependant = '123' - self.coerce_good_values = [([dep], ['123']), ([dep], ['123'])] - self.coerce_bad_values = ['BOGUS'] - - self.to_primitive_values = [([dep], [dep])] - self.from_primitive_values = [([dep], [dep])] - - def test_stringify(self): - self.assertEqual('[abc,def]', self.field.stringify(['abc', 'def'])) diff --git a/senlin/tests/unit/objects/test_health_registry.py b/senlin/tests/unit/objects/test_health_registry.py deleted file mode 100644 index 8c3721fb6..000000000 --- a/senlin/tests/unit/objects/test_health_registry.py +++ /dev/null @@ -1,121 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools -from unittest import mock - -from senlin.db import api as db_api -from senlin.objects import base -from senlin.objects import health_registry as hro - - -class TestHealthRegistry(testtools.TestCase): - - def setUp(self): - super(TestHealthRegistry, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(base.SenlinObject, '_from_db_object') - @mock.patch.object(db_api, 'registry_create') - def test_create(self, mock_create, mock_from): - x_registry = mock.Mock() - mock_create.return_value = x_registry - x_obj = mock.Mock() - mock_from.return_value = x_obj - - result = hro.HealthRegistry.create( - self.ctx, "FAKE_ID", "FAKE_TYPE", 123, {'foo': 'bar'}, - 'FAKE_ENGINE') - - self.assertEqual(x_obj, result) - mock_create.assert_called_once_with( - self.ctx, "FAKE_ID", "FAKE_TYPE", 123, {'foo': 'bar'}, - "FAKE_ENGINE", enabled=True) - mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry) - - @mock.patch.object(db_api, 'registry_update') - def test_update(self, mock_update): - hro.HealthRegistry.update(self.ctx, "FAKE_ID", {"foo": "bar"}) - - mock_update.assert_called_once_with( - self.ctx, "FAKE_ID", {"foo": "bar"}) - - @mock.patch.object(base.SenlinObject, '_from_db_object') - @mock.patch.object(db_api, 'registry_claim') - def test_claim(self, mock_claim, mock_from): - x_registry = mock.Mock() - mock_claim.return_value = [x_registry] - x_obj = mock.Mock() - mock_from.side_effect = [x_obj] - - result = hro.HealthRegistry.claim(self.ctx, "FAKE_ENGINE") - - self.assertEqual([x_obj], result) - mock_claim.assert_called_once_with(self.ctx, "FAKE_ENGINE") - mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry) - - @mock.patch.object(db_api, 'registry_delete') - def test_delete(self, mock_delete): - hro.HealthRegistry.delete(self.ctx, "FAKE_ID") - - mock_delete.assert_called_once_with(self.ctx, "FAKE_ID") - - @mock.patch.object(base.SenlinObject, '_from_db_object') - @mock.patch.object(db_api, 'registry_get') - def test_get(self, mock_get, mock_from): - x_registry = mock.Mock() - x_registry.cluster_id = 'FAKE' - mock_get.return_value = x_registry - - x_obj = mock.Mock() - mock_from.return_value = x_obj - - result = hro.HealthRegistry.get(self.ctx, 'FAKE') - - self.assertEqual(x_obj, result) - mock_get.assert_called_once_with(self.ctx, 'FAKE') - mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry) - - @mock.patch.object(base.SenlinObject, '_from_db_object') - @mock.patch.object(db_api, 'registry_get_by_param') - def test_get_by_engine(self, mock_get, mock_from): - x_registry = mock.Mock() - x_registry.cluster_id = 'FAKE' - x_registry.engine_id = 'FAKE_ENGINE' - mock_get.return_value = x_registry - - x_obj = mock.Mock() - mock_from.return_value = x_obj - - result = hro.HealthRegistry.get_by_engine( - self.ctx, 'FAKE_ENGINE', 'FAKE') - - self.assertEqual(x_obj, result) - mock_get.assert_called_once_with( - self.ctx, {"cluster_id": "FAKE", "engine_id": "FAKE_ENGINE"}) - mock_from.assert_called_once_with(self.ctx, mock.ANY, x_registry) - - @mock.patch.object(hro.HealthRegistry, 'update') - def test_disable(self, mock_update): - hro.HealthRegistry.disable_registry( - self.ctx, "FAKE_ID") - - mock_update.assert_called_once_with( - self.ctx, "FAKE_ID", {"enabled": False}) - - @mock.patch.object(hro.HealthRegistry, 'update') - def test_enable(self, mock_update): - hro.HealthRegistry.enable_registry( - self.ctx, "FAKE_ID") - - mock_update.assert_called_once_with( - self.ctx, "FAKE_ID", {"enabled": True}) diff --git a/senlin/tests/unit/objects/test_node.py b/senlin/tests/unit/objects/test_node.py deleted file mode 100644 index f08587681..000000000 --- a/senlin/tests/unit/objects/test_node.py +++ /dev/null @@ -1,137 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import timeutils -from oslo_utils import uuidutils - -from senlin.common import exception as exc -from senlin.common import utils as common_utils -from senlin.objects import node as no -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestNode(base.SenlinTestCase): - - def setUp(self): - super(TestNode, self).setUp() - self.ctx = utils.dummy_context() - - @mock.patch.object(no.Node, 'get') - def test_find_by_uuid(self, mock_get): - x_node = mock.Mock() - mock_get.return_value = x_node - aid = uuidutils.generate_uuid() - - result = no.Node.find(self.ctx, aid) - - self.assertEqual(x_node, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(no.Node, 'get_by_name') - @mock.patch.object(no.Node, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_name): - mock_get.return_value = None - x_node = mock.Mock() - mock_name.return_value = x_node - aid = uuidutils.generate_uuid() - - result = no.Node.find(self.ctx, aid, False) - - self.assertEqual(x_node, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_name.assert_called_once_with(self.ctx, aid, project_safe=False) - - @mock.patch.object(no.Node, 'get_by_name') - def test_find_by_name(self, mock_name): - x_node = mock.Mock() - mock_name.return_value = x_node - aid = 'not-a-uuid' - - result = no.Node.find(self.ctx, aid) - - self.assertEqual(x_node, result) - mock_name.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(no.Node, 'get_by_short_id') - @mock.patch.object(no.Node, 'get_by_name') - def test_find_by_short_id(self, mock_name, mock_shortid): - mock_name.return_value = None - x_node = mock.Mock() - mock_shortid.return_value = x_node - aid = 'abcdef' - - result = no.Node.find(self.ctx, aid, False) - - self.assertEqual(x_node, result) - mock_name.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_shortid.assert_called_once_with(self.ctx, aid, project_safe=False) - - @mock.patch.object(no.Node, 'get_by_name') - @mock.patch.object(no.Node, 'get_by_short_id') - def test_find_not_found(self, mock_shortid, mock_name): - mock_name.return_value = None - mock_shortid.return_value = None - - ex = self.assertRaises(exc.ResourceNotFound, - no.Node.find, - self.ctx, 'BOGUS') - self.assertEqual("The node 'BOGUS' could not be found.", - str(ex)) - mock_name.assert_called_once_with(self.ctx, 'BOGUS', project_safe=True) - mock_shortid.assert_called_once_with(self.ctx, 'BOGUS', - project_safe=True) - - def test_to_dict(self): - PROFILE_ID = uuidutils.generate_uuid() - CLUSTER_ID = uuidutils.generate_uuid() - values = { - 'name': 'test_node', - 'profile_id': PROFILE_ID, - 'cluster_id': CLUSTER_ID, - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'index': -1, - 'init_at': timeutils.utcnow(True), - 'status': 'Initializing', - } - node = no.Node.create(self.ctx, values) - self.assertIsNotNone(node.id) - - expected = { - 'id': node.id, - 'name': node.name, - 'cluster_id': node.cluster_id, - 'physical_id': node.physical_id, - 'profile_id': node.profile_id, - 'user': node.user, - 'project': node.project, - 'domain': node.domain, - 'index': node.index, - 'role': node.role, - 'init_at': common_utils.isotime(node.init_at), - 'created_at': common_utils.isotime(node.created_at), - 'updated_at': common_utils.isotime(node.updated_at), - 'status': node.status, - 'status_reason': node.status_reason, - 'data': node.data, - 'metadata': node.metadata, - 'dependents': node.dependents, - 'profile_name': node.profile_name, - 'tainted': False, - } - - result = no.Node.get(self.ctx, node.id) - dt = result.to_dict() - self.assertEqual(expected, dt) diff --git a/senlin/tests/unit/objects/test_notification.py b/senlin/tests/unit/objects/test_notification.py deleted file mode 100644 index 5a24608d3..000000000 --- a/senlin/tests/unit/objects/test_notification.py +++ /dev/null @@ -1,608 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_utils import timeutils -from oslo_utils import uuidutils -import testtools - -from senlin.common import consts -from senlin.common import exception -from senlin.engine.actions import base as action_base -from senlin.engine import cluster -from senlin.engine import node -from senlin import objects -from senlin.objects import base as vo_base -from senlin.objects import fields -from senlin.objects import notification as base -from senlin.tests.unit.common import utils - - -@vo_base.SenlinObjectRegistry.register_if(False) -class TestObject(vo_base.SenlinObject): - VERSION = '1.0' - fields = { - 'field_1': fields.StringField(), - 'field_2': fields.IntegerField(), - 'not_important_field': fields.IntegerField(), - } - - -@vo_base.SenlinObjectRegistry.register_if(False) -class TestPayload(base.NotificationObject): - VERSION = '1.0' - - fields = { - 'extra_field': fields.StringField(), - 'field_1': fields.StringField(), - 'field_2': fields.IntegerField(), - } - - -@vo_base.SenlinObjectRegistry.register_if(False) -class TestNotification(base.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': fields.ObjectField('TestPayload') - } - - -@vo_base.SenlinObjectRegistry.register_if(False) -class TestNotificationEmptySchema(base.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': fields.ObjectField('TestPayloadEmptySchema') - } - - -class TestNotificationBase(testtools.TestCase): - - fake_service = { - 'created_at': timeutils.utcnow(True), - 'updated_at': timeutils.utcnow(True), - 'id': uuidutils.generate_uuid(), - 'host': 'fake-host', - 'binary': 'senlin-fake', - 'topic': 'fake-service-topic', - 'disabled': False, - 'disabled_reason': None, - } - - expected_payload = { - 'senlin_object.name': 'TestPayload', - 'senlin_object.data': { - 'field_1': 'test1', - 'field_2': 42, - 'extra_field': 'test string', - }, - 'senlin_object.version': '1.0', - 'senlin_object.namespace': 'senlin' - } - - def setUp(self): - super(TestNotificationBase, self).setUp() - self.ctx = utils.dummy_context() - self.service_obj = objects.Service(**self.fake_service) - - self.my_obj = TestObject(field_1='test1', field_2=42, - not_important_field=13) - - self.payload = TestPayload(field_1='test1', field_2=42, - extra_field='test string') - - self.notification = TestNotification( - event_type=base.EventType( - object='test_object', - action='update', - phase=consts.PHASE_START), - publisher=base.NotificationPublisher.from_service( - self.service_obj), - priority=consts.PRIO_INFO, - payload=self.payload) - - def _verify_notification(self, mock_notifier, mock_context, - expected_event_type, expected_payload): - mock_notifier.prepare.assert_called_once_with( - publisher_id='senlin-fake:fake-host') - mock_notify = mock_notifier.prepare.return_value.info - self.assertTrue(mock_notify.called) - self.assertEqual(mock_notify.call_args[0][0], mock_context) - self.assertEqual(mock_notify.call_args[0][1], - expected_event_type) - actual_payload = mock_notify.call_args[0][2] - self.assertEqual(expected_payload, actual_payload) - - @mock.patch('senlin.common.messaging.NOTIFIER') - def test_emit_notification(self, mock_notifier): - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - self.notification.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update.start', - expected_payload=self.expected_payload) - - @mock.patch('senlin.common.messaging.NOTIFIER') - def test_emit_with_host_and_binary_as_publisher(self, mock_notifier): - event_type = base.EventType( - object='test_object', - action='update') - publisher = base.NotificationPublisher(host='fake-host', - binary='senlin-fake') - - noti = TestNotification(event_type=event_type, - publisher=publisher, - priority=consts.PRIO_INFO, - payload=self.payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload=self.expected_payload) - - @mock.patch('senlin.common.messaging.NOTIFIER') - def test_emit_event_type_without_phase(self, mock_notifier): - noti = TestNotification( - event_type=base.EventType( - object='test_object', - action='update'), - publisher=base.NotificationPublisher.from_service( - self.service_obj), - priority=consts.PRIO_INFO, - payload=self.payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload=self.expected_payload) - - -class TestExceptionPayload(testtools.TestCase): - - def test_create(self): - ex = base.ExceptionPayload( - module='fake_module', - function='fake_function', - exception='fake_exception', - message='fake_message') - - self.assertEqual('fake_module', ex.module) - self.assertEqual('fake_function', ex.function) - self.assertEqual('fake_exception', ex.exception) - self.assertEqual('fake_message', ex.message) - - def test_create_from_exception(self): - ex = None - pload = None - - try: - {}['key'] - except Exception: - ex = exception.BadRequest(msg="It is really bad.") - pload = base.ExceptionPayload.from_exception(ex) - - self.assertIsNotNone(ex) - self.assertIsNotNone(pload) - - # 'senlin.tests.unit.objects.notifications.test_exception', - self.assertEqual(self.__module__, pload.module) - self.assertEqual('test_create_from_exception', pload.function) - self.assertEqual('BadRequest', pload.exception) - self.assertEqual("It is really bad.", pload.message) - - def test_create_from_none(self): - pload = base.ExceptionPayload.from_exception(None) - self.assertIsNone(pload) - - -class TestClusterPayload(testtools.TestCase): - - def setUp(self): - super(TestClusterPayload, self).setUp() - - uuid = uuidutils.generate_uuid() - prof_uuid = uuidutils.generate_uuid() - dt = timeutils.utcnow(True) - self.params = { - 'id': uuid, - 'name': 'fake_name', - 'profile_id': prof_uuid, - 'init_at': dt, - 'created_at': dt, - 'updated_at': dt, - 'min_size': 1, - 'max_size': 10, - 'desired_capacity': 5, - 'timeout': 4, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'metadata': {'foo': 'bar'}, - 'data': {'key': 'value'}, - 'user': 'user1', - 'project': 'project1', - 'domain': 'domain1', - 'dependents': {'zoo': {'lion', 'deer'}} - } - - def _verify_equality(self, obj, params): - for k, v in params.items(): - self.assertTrue(obj.obj_attr_is_set(k)) - self.assertEqual(v, getattr(obj, k)) - - def test_create(self): - sot = base.ClusterPayload(**self.params) - self._verify_equality(sot, self.params) - - def test_create_with_required_fields(self): - params = { - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'profile_id': uuidutils.generate_uuid(), - 'init_at': timeutils.utcnow(True), - 'min_size': 1, - 'max_size': 10, - 'desired_capacity': 5, - 'timeout': 4, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - - sot = base.ClusterPayload(**params) - - self._verify_equality(sot, params) - - def test_create_with_obj(self): - params = copy.deepcopy(self.params) - name = params.pop('name') - desired_capacity = params.pop('desired_capacity') - profile_id = params.pop('profile_id') - c1 = cluster.Cluster(name, desired_capacity, profile_id, **params) - - sot = base.ClusterPayload.from_cluster(c1) - - self._verify_equality(sot, self.params) - - -class TestNodePayload(testtools.TestCase): - - def setUp(self): - super(TestNodePayload, self).setUp() - - uuid = uuidutils.generate_uuid() - prof_uuid = uuidutils.generate_uuid() - cluster_uuid = uuidutils.generate_uuid() - physical_uuid = uuidutils.generate_uuid() - dt = timeutils.utcnow(True) - self.params = { - 'id': uuid, - 'name': 'fake_name', - 'profile_id': prof_uuid, - 'cluster_id': cluster_uuid, - 'physical_id': physical_uuid, - 'index': 3, - 'role': 'master', - 'init_at': dt, - 'created_at': dt, - 'updated_at': dt, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'metadata': {'foo': 'bar'}, - 'data': {'key': 'value'}, - 'user': 'user1', - 'project': 'project1', - 'domain': 'domain1', - 'dependents': {'zoo': {'lion', 'deer'}} - } - - def _verify_equality(self, obj, params): - for k, v in params.items(): - self.assertTrue(obj.obj_attr_is_set(k)) - self.assertEqual(v, getattr(obj, k)) - - def test_create(self): - sot = base.NodePayload(**self.params) - self._verify_equality(sot, self.params) - - def test_create_with_required_fields(self): - params = { - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'profile_id': uuidutils.generate_uuid(), - 'cluster_id': '', - 'index': -1, - 'init_at': timeutils.utcnow(True), - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - - sot = base.NodePayload(**params) - - self._verify_equality(sot, params) - - def test_create_with_obj(self): - params = copy.deepcopy(self.params) - name = params.pop('name') - profile_id = params.pop('profile_id') - n1 = node.Node(name, profile_id, **params) - - sot = base.NodePayload.from_node(n1) - - self._verify_equality(sot, self.params) - - -class TestActionPayload(testtools.TestCase): - - def setUp(self): - super(TestActionPayload, self).setUp() - - uuid = uuidutils.generate_uuid() - target_uuid = uuidutils.generate_uuid() - dt = timeutils.utcnow(True) - self.params = { - 'id': uuid, - 'name': 'fake_name', - 'created_at': dt, - 'target': target_uuid, - 'action': 'CLUSTER_CREATE', - 'start_time': 1.23, - 'end_time': 4.56, - 'timeout': 78, - 'status': 'RUNNING', - 'status_reason': 'Clear', - 'inputs': {'key': 'value'}, - 'outputs': {'foo': 'bar'}, - 'data': {'zoo': 'nar'}, - 'user': 'user1', - 'project': 'project1', - } - - def _verify_equality(self, obj, params): - for k, v in params.items(): - self.assertTrue(obj.obj_attr_is_set(k)) - self.assertEqual(v, getattr(obj, k)) - - def test_create(self): - sot = base.ActionPayload(**self.params) - self._verify_equality(sot, self.params) - - def test_create_with_required_fields(self): - params = { - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'target': uuidutils.generate_uuid(), - 'action': 'CLUSTER_CREATE', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - - sot = base.ActionPayload(**params) - - self._verify_equality(sot, params) - - def test_create_with_obj(self): - a1 = objects.Action(**self.params) - - sot = base.ActionPayload.from_action(a1) - - self._verify_equality(sot, self.params) - - -class TestClusterActionPayload(testtools.TestCase): - - def setUp(self): - super(TestClusterActionPayload, self).setUp() - ctx = utils.dummy_context() - cluster_params = { - 'id': uuidutils.generate_uuid(), - 'init_at': timeutils.utcnow(True), - 'min_size': 1, - 'max_size': 10, - 'timeout': 4, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.cluster = cluster.Cluster('CC', 5, uuidutils.generate_uuid(), - **cluster_params) - action_params = { - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.action = action_base.Action(uuidutils.generate_uuid(), - 'CLUSTER_CREATE', ctx, - **action_params) - - def test_create(self): - exobj = None - try: - {}['key'] - except Exception: - ex = exception.InvalidSpec(message='boom') - exobj = base.ExceptionPayload.from_exception(ex) - - sot = base.ClusterActionPayload(cluster=self.cluster, - action=self.action, - exception=exobj) - - self.assertTrue(sot.obj_attr_is_set('cluster')) - self.assertTrue(sot.obj_attr_is_set('action')) - self.assertTrue(sot.obj_attr_is_set('exception')) - self.assertIsNotNone(sot.exception) - - def test_create_with_no_exc(self): - ex = None - sot = base.ClusterActionPayload(cluster=self.cluster, - action=self.action, - exception=ex) - - self.assertTrue(sot.obj_attr_is_set('cluster')) - self.assertTrue(sot.obj_attr_is_set('action')) - self.assertTrue(sot.obj_attr_is_set('exception')) - self.assertIsNone(sot.exception) - - -class TestNodeActionPayload(testtools.TestCase): - - def setUp(self): - super(TestNodeActionPayload, self).setUp() - ctx = utils.dummy_context() - node_params = { - 'id': uuidutils.generate_uuid(), - 'cluster_id': '', - 'index': -1, - 'init_at': timeutils.utcnow(True), - 'status': 'ACTIVE', - 'status_reason': 'Good', - } - self.node = node.Node('NN', uuidutils.generate_uuid(), **node_params) - action_params = { - - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.action = action_base.Action(uuidutils.generate_uuid(), - 'NODE_CREATE', ctx, **action_params) - - def test_create(self): - exobj = None - try: - {}['key'] - except Exception: - ex = exception.InvalidSpec(message='boom') - exobj = base.ExceptionPayload.from_exception(ex) - - sot = base.NodeActionPayload(node=self.node, - action=self.action, - exception=exobj) - - self.assertTrue(sot.obj_attr_is_set('node')) - self.assertTrue(sot.obj_attr_is_set('action')) - self.assertTrue(sot.obj_attr_is_set('exception')) - self.assertIsNotNone(sot.exception) - - def test_create_with_no_exc(self): - sot = base.NodeActionPayload(node=self.node, action=self.action) - - self.assertTrue(sot.obj_attr_is_set('node')) - self.assertTrue(sot.obj_attr_is_set('action')) - self.assertTrue(sot.obj_attr_is_set('exception')) - self.assertIsNone(sot.exception) - - -class TestClusterActionNotification(testtools.TestCase): - - def setUp(self): - super(TestClusterActionNotification, self).setUp() - ctx = utils.dummy_context() - cluster_params = { - 'id': uuidutils.generate_uuid(), - 'init_at': timeutils.utcnow(True), - 'min_size': 1, - 'max_size': 10, - 'timeout': 4, - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.cluster = cluster.Cluster('CC', 5, uuidutils.generate_uuid(), - **cluster_params) - action_params = { - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.action = action_base.Action(uuidutils.generate_uuid(), - 'CLUSTER_CREATE', ctx, - **action_params) - - def test_create(self): - payload = base.ClusterActionPayload(cluster=self.cluster, - action=self.action) - - sot = base.ClusterActionNotification(payload=payload) - - self.assertTrue(sot.obj_attr_is_set('payload')) - - -class TestNodeActionNotification(testtools.TestCase): - - def setUp(self): - super(TestNodeActionNotification, self).setUp() - ctx = utils.dummy_context() - node_params = { - 'id': uuidutils.generate_uuid(), - 'cluster_id': '', - 'index': -1, - 'init_at': timeutils.utcnow(True), - 'status': 'ACTIVE', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.node = node.Node('NN', uuidutils.generate_uuid(), **node_params) - action_params = { - - 'id': uuidutils.generate_uuid(), - 'name': 'fake_name', - 'start_time': 1.23, - 'status': 'RUNNING', - 'status_reason': 'Good', - 'user': 'user1', - 'project': 'project1', - } - self.action = action_base.Action(uuidutils.generate_uuid(), - 'NODE_CREATE', ctx, **action_params) - - def test_create(self): - payload = base.NodeActionPayload(node=self.node, action=self.action) - - sot = base.NodeActionNotification(payload=payload) - - self.assertTrue(sot.obj_attr_is_set('payload')) diff --git a/senlin/tests/unit/objects/test_policy.py b/senlin/tests/unit/objects/test_policy.py deleted file mode 100644 index 4e08d79e2..000000000 --- a/senlin/tests/unit/objects/test_policy.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -import testtools - -from senlin.common import exception as exc -from senlin.objects import policy as po - - -class TestPolicy(testtools.TestCase): - - def setUp(self): - super(TestPolicy, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(po.Policy, 'get') - def test_find_by_uuid(self, mock_get): - x_policy = mock.Mock() - mock_get.return_value = x_policy - aid = uuidutils.generate_uuid() - - result = po.Policy.find(self.ctx, aid) - - self.assertEqual(x_policy, result) - mock_get.assert_called_once_with(self.ctx, aid) - - @mock.patch.object(po.Policy, 'get_by_name') - @mock.patch.object(po.Policy, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_get_name): - x_policy = mock.Mock() - mock_get_name.return_value = x_policy - mock_get.return_value = None - aid = uuidutils.generate_uuid() - - result = po.Policy.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_policy, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(po.Policy, 'get_by_name') - def test_find_by_name(self, mock_get_name): - x_policy = mock.Mock() - mock_get_name.return_value = x_policy - - aid = 'this-is-not-uuid' - result = po.Policy.find(self.ctx, aid) - - self.assertEqual(x_policy, result) - mock_get_name.assert_called_once_with(self.ctx, aid) - - @mock.patch.object(po.Policy, 'get_by_short_id') - @mock.patch.object(po.Policy, 'get_by_name') - def test_find_by_shortid(self, mock_get_name, mock_get_shortid): - x_policy = mock.Mock() - mock_get_shortid.return_value = x_policy - mock_get_name.return_value = None - aid = 'abcd-1234-abcd' - - result = po.Policy.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_policy, result) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - mock_get_shortid.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(po.Policy, 'get_by_short_id') - @mock.patch.object(po.Policy, 'get_by_name') - def test_find_not_found(self, mock_get_name, mock_get_shortid): - mock_get_shortid.return_value = None - mock_get_name.return_value = None - - ex = self.assertRaises(exc.ResourceNotFound, - po.Policy.find, - self.ctx, 'Bogus') - - self.assertEqual("The policy 'Bogus' could not be found.", - str(ex)) - mock_get_name.assert_called_once_with(self.ctx, 'Bogus') diff --git a/senlin/tests/unit/objects/test_profile.py b/senlin/tests/unit/objects/test_profile.py deleted file mode 100644 index 24f4bc4f8..000000000 --- a/senlin/tests/unit/objects/test_profile.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -import testtools - -from senlin.common import exception as exc -from senlin.objects import profile as po - - -class TestProfile(testtools.TestCase): - - def setUp(self): - super(TestProfile, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(po.Profile, 'get') - def test_find_by_uuid(self, mock_get): - x_profile = mock.Mock() - mock_get.return_value = x_profile - aid = uuidutils.generate_uuid() - - result = po.Profile.find(self.ctx, aid, project_safe=True) - - self.assertEqual(x_profile, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(po.Profile, 'get_by_name') - @mock.patch.object(po.Profile, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_get_name): - x_profile = mock.Mock() - mock_get_name.return_value = x_profile - mock_get.return_value = None - aid = uuidutils.generate_uuid() - - result = po.Profile.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_profile, result) - mock_get.assert_called_once_with(self.ctx, aid, project_safe=False) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(po.Profile, 'get_by_name') - def test_find_by_name(self, mock_get_name): - x_profile = mock.Mock() - mock_get_name.return_value = x_profile - - aid = 'this-is-not-uuid' - result = po.Profile.find(self.ctx, aid, project_safe=True) - - self.assertEqual(x_profile, result) - mock_get_name.assert_called_once_with(self.ctx, aid, project_safe=True) - - @mock.patch.object(po.Profile, 'get_by_short_id') - @mock.patch.object(po.Profile, 'get_by_name') - def test_find_by_shortid(self, mock_get_name, mock_get_shortid): - x_profile = mock.Mock() - mock_get_shortid.return_value = x_profile - mock_get_name.return_value = None - - aid = 'abcd-1234-abcd' - result = po.Profile.find(self.ctx, aid, project_safe=False) - - self.assertEqual(x_profile, result) - mock_get_name.assert_called_once_with(self.ctx, aid, - project_safe=False) - mock_get_shortid.assert_called_once_with(self.ctx, aid, - project_safe=False) - - @mock.patch.object(po.Profile, 'get_by_short_id') - @mock.patch.object(po.Profile, 'get_by_name') - def test_find_not_found(self, mock_get_name, mock_get_shortid): - mock_get_name.return_value = None - mock_get_shortid.return_value = None - - ex = self.assertRaises(exc.ResourceNotFound, - po.Profile.find, - self.ctx, 'Bogus') - - self.assertEqual("The profile 'Bogus' could not be found.", - str(ex)) - mock_get_name.assert_called_once_with(self.ctx, 'Bogus') diff --git a/senlin/tests/unit/objects/test_receiver.py b/senlin/tests/unit/objects/test_receiver.py deleted file mode 100644 index 152336fb3..000000000 --- a/senlin/tests/unit/objects/test_receiver.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import uuidutils -import testtools - -from senlin.common import exception as exc -from senlin.objects import receiver as ro - - -class ReceiverTest(testtools.TestCase): - - def setUp(self): - super(ReceiverTest, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(ro.Receiver, 'get') - def test_find_by_uuid(self, mock_get): - fake_obj = mock.Mock() - mock_get.return_value = fake_obj - fake_id = uuidutils.generate_uuid() - - res = ro.Receiver.find(self.ctx, fake_id) - - self.assertEqual(fake_obj, res) - mock_get.assert_called_once_with(self.ctx, fake_id) - - @mock.patch.object(ro.Receiver, 'get_by_name') - @mock.patch.object(ro.Receiver, 'get') - def test_find_by_uuid_as_name(self, mock_get, mock_get_name): - mock_get.return_value = None - fake_obj = mock.Mock() - mock_get_name.return_value = fake_obj - fake_id = uuidutils.generate_uuid() - - res = ro.Receiver.find(self.ctx, fake_id, project_safe=False) - - self.assertEqual(fake_obj, res) - mock_get.assert_called_once_with(self.ctx, fake_id, project_safe=False) - mock_get_name.assert_called_once_with(self.ctx, fake_id, - project_safe=False) - - @mock.patch.object(ro.Receiver, 'get_by_name') - def test_find_by_name(self, mock_get_name): - fake_obj = mock.Mock() - mock_get_name.return_value = fake_obj - fake_id = 'not-a-uuid' - - res = ro.Receiver.find(self.ctx, fake_id) - - self.assertEqual(fake_obj, res) - mock_get_name.assert_called_once_with(self.ctx, fake_id) - - @mock.patch.object(ro.Receiver, 'get_by_short_id') - @mock.patch.object(ro.Receiver, 'get_by_name') - def test_find_by_short_id(self, mock_get_name, mock_get_shortid): - mock_get_name.return_value = None - fake_obj = mock.Mock() - mock_get_shortid.return_value = fake_obj - fake_id = '12345678' - - res = ro.Receiver.find(self.ctx, fake_id, project_safe=False) - - self.assertEqual(fake_obj, res) - mock_get_name.assert_called_once_with(self.ctx, fake_id, - project_safe=False) - mock_get_shortid.assert_called_once_with(self.ctx, fake_id, - project_safe=False) - - @mock.patch.object(ro.Receiver, 'get_by_short_id') - @mock.patch.object(ro.Receiver, 'get_by_name') - def test_find_not_found(self, mock_get_name, mock_get_shortid): - mock_get_shortid.return_value = None - mock_get_name.return_value = None - fake_id = '12345678' # not a uuid - - self.assertRaises(exc.ResourceNotFound, - ro.Receiver.find, - self.ctx, fake_id) - - mock_get_name.assert_called_once_with(self.ctx, fake_id) diff --git a/senlin/tests/unit/policies/__init__.py b/senlin/tests/unit/policies/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/policies/test_affinity.py b/senlin/tests/unit/policies/test_affinity.py deleted file mode 100644 index 55b79b2c8..000000000 --- a/senlin/tests/unit/policies/test_affinity.py +++ /dev/null @@ -1,853 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - - -from senlin.common import consts -from senlin.common import context -from senlin.common import exception as exc -from senlin.common import scaleutils -from senlin.objects import cluster_policy as cpo -from senlin.policies import affinity_policy as ap -from senlin.policies import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestAffinityPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestAffinityPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.affinity', - 'version': '1.0', - 'properties': { - 'servergroup': {} - }, - } - - def test_policy_init(self): - policy = ap.AffinityPolicy('test-policy', self.spec) - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.affinity-1.0', policy.type) - self.assertFalse(policy.enable_drs) - self.assertIsNone(policy._novaclient) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_okay(self, mock_base_validate): - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['availability_zone'] = 'NEWAZ' - policy = ap.AffinityPolicy('test-policy', new_spec) - nc = mock.Mock() - nc.validate_azs.return_value = ['NEWAZ'] - policy._novaclient = nc - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, True) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, True) - nc.validate_azs.assert_called_once_with(['NEWAZ']) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_no_validate_props(self, mock_base_validate): - policy = ap.AffinityPolicy('test-policy', self.spec) - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, False) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, False) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_az_not_specified(self, mock_base_validate): - policy = ap.AffinityPolicy('test-policy', self.spec) - nc = mock.Mock() - policy._novaclient = nc - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, True) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, True) - self.assertEqual(0, nc.validate_azs.call_count) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_az_not_found(self, mock_base_validate): - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['availability_zone'] = 'NEWAZ' - policy = ap.AffinityPolicy('test-policy', new_spec) - nc = mock.Mock() - nc.validate_azs.return_value = [] # this means not found - policy._novaclient = nc - ctx = mock.Mock(user='U1', project='P1') - - ex = self.assertRaises(exc.InvalidSpec, - policy.validate, - ctx, True) - - mock_base_validate.assert_called_once_with(ctx, True) - nc.validate_azs.assert_called_once_with(['NEWAZ']) - self.assertEqual("The specified availability_zone 'NEWAZ' could not " - "be found.", str(ex)) - - def test_attach_using_profile_hints(self): - x_profile = mock.Mock() - x_profile.type = 'os.nova.server-1.0' - x_profile.spec = { - 'scheduler_hints': { - 'group': 'KONGFOO', - } - } - cluster = mock.Mock(id='CLUSTER_ID', user='UU', project='PP', - rt={'profile': x_profile}) - x_group = mock.Mock(id='GROUP_ID', policies=[u'anti-affinity']) - x_nova = mock.Mock() - x_nova.server_group_find.return_value = x_group - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_data = mock.Mock() - mock_build = self.patchobject(policy, '_build_policy_data', - return_value=x_data) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertEqual(x_data, data) - self.assertTrue(res) - - mock_nova.assert_called_once_with('UU', 'PP') - x_nova.server_group_find.assert_called_once_with('KONGFOO', True) - mock_build.assert_called_once_with({ - 'servergroup_id': 'GROUP_ID', - 'inherited_group': True - }) - - def test_attach_with_group_found(self): - self.spec['properties']['servergroup']['name'] = 'KONGFU' - x_profile = mock.Mock() - x_profile.type = 'os.nova.server-1.0' - x_profile.spec = {'foo': 'bar'} - cluster = mock.Mock(id='CLUSTER_ID', user='UU', project='PP', - rt={'profile': x_profile}) - x_group = mock.Mock(id='GROUP_ID', policies=['anti-affinity']) - x_nova = mock.Mock() - x_nova.server_group_find.return_value = x_group - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_data = mock.Mock() - mock_build = self.patchobject(policy, '_build_policy_data', - return_value=x_data) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual(x_data, data) - - mock_nova.assert_called_once_with('UU', 'PP') - x_nova.server_group_find.assert_called_once_with('KONGFU', True) - mock_build.assert_called_once_with({ - 'servergroup_id': 'GROUP_ID', - 'inherited_group': True - }) - - def test_attach_with_group_not_found(self): - self.spec['properties']['servergroup']['name'] = 'KONGFU' - x_profile = mock.Mock() - x_profile.spec = {'foo': 'bar'} - x_profile.type = 'os.nova.server-1.0' - cluster = mock.Mock(id='CLUSTER_ID', user='USER', project='PROJ', - rt={'profile': x_profile}) - x_group = mock.Mock(id='GROUP_ID') - x_nova = mock.Mock() - x_nova.server_group_find.return_value = None - x_nova.server_group_create.return_value = x_group - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_data = mock.Mock() - mock_build = self.patchobject(policy, '_build_policy_data', - return_value=x_data) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual(x_data, data) - - mock_nova.assert_called_once_with('USER', 'PROJ') - x_nova.server_group_find.assert_called_once_with('KONGFU', True) - x_nova.server_group_create.assert_called_once_with( - name='KONGFU', - policies=[policy.ANTI_AFFINITY]) - mock_build.assert_called_once_with({ - 'servergroup_id': 'GROUP_ID', - 'inherited_group': False - }) - - def test_attach_with_group_name_not_provided(self): - x_profile = mock.Mock() - x_profile.spec = {'foo': 'bar'} - x_profile.type = 'os.nova.server-1.0' - cluster = mock.Mock(id='CLUSTER_ID', user='USER', project='PROJ', - rt={'profile': x_profile}) - x_group = mock.Mock(id='GROUP_ID') - x_nova = mock.Mock() - x_nova.server_group_create.return_value = x_group - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_data = mock.Mock() - mock_build = self.patchobject(policy, '_build_policy_data', - return_value=x_data) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual(x_data, data) - - mock_nova.assert_called_once_with('USER', 'PROJ') - x_nova.server_group_create.assert_called_once_with( - name=mock.ANY, - policies=[policy.ANTI_AFFINITY]) - mock_build.assert_called_once_with({ - 'servergroup_id': 'GROUP_ID', - 'inherited_group': False - }) - - @mock.patch.object(pb.Policy, 'attach') - def test_attach_failed_base_return_false(self, mock_attach): - cluster = mock.Mock() - mock_attach.return_value = (False, 'Something is wrong.') - - policy = ap.AffinityPolicy('test-policy', self.spec) - - res, data = policy.attach(cluster) - - self.assertFalse(res) - self.assertEqual('Something is wrong.', data) - - def test_attach_failed_finding(self): - self.spec['properties']['servergroup']['name'] = 'KONGFU' - x_profile = mock.Mock() - x_profile.type = 'os.nova.server-1.0' - x_profile.spec = {'foo': 'bar'} - cluster = mock.Mock(id='CLUSTER_ID', user='USER', project='PROJ', - rt={'profile': x_profile}) - x_nova = mock.Mock() - err = exc.InternalError(code=500, message='Boom') - x_nova.server_group_find.side_effect = err - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertFalse(res) - self.assertEqual("Failed in retrieving servergroup 'KONGFU'.", data) - - mock_nova.assert_called_once_with('USER', 'PROJ') - x_nova.server_group_find.assert_called_once_with('KONGFU', True) - - def test_attach_policies_not_match(self): - self.spec['properties']['servergroup']['name'] = 'KONGFU' - x_profile = mock.Mock() - x_profile.type = 'os.nova.server-1.0' - x_profile.spec = {'foo': 'bar'} - cluster = mock.Mock(id='CLUSTER_ID', user='U1', project='P1', - rt={'profile': x_profile}) - x_group = mock.Mock(id='GROUP_ID', policies=['affinity']) - x_nova = mock.Mock() - x_nova.server_group_find.return_value = x_group - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertFalse(res) - self.assertEqual("Policies specified (anti-affinity) doesn't match " - "that of the existing servergroup (affinity).", - data) - - mock_nova.assert_called_once_with('U1', 'P1') - x_nova.server_group_find.assert_called_once_with('KONGFU', True) - - def test_attach_failed_creating_server_group(self): - self.spec['properties']['servergroup']['name'] = 'KONGFU' - x_profile = mock.Mock() - x_profile.type = 'os.nova.server-1.0' - x_profile.spec = {'foo': 'bar'} - cluster = mock.Mock(id='CLUSTER_ID', user='U1', project='P1', - rt={'profile': x_profile}) - x_nova = mock.Mock() - x_nova.server_group_find.return_value = None - x_nova.server_group_create.side_effect = Exception() - - policy = ap.AffinityPolicy('test-policy', self.spec) - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - - # do it - res, data = policy.attach(cluster) - - # assertions - self.assertEqual('Failed in creating servergroup.', data) - self.assertFalse(res) - - mock_nova.assert_called_once_with('U1', 'P1') - x_nova.server_group_find.assert_called_once_with('KONGFU', True) - x_nova.server_group_create.assert_called_once_with( - name=mock.ANY, - policies=[policy.ANTI_AFFINITY]) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_inherited(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - x_binding = mock.Mock() - mock_cp.return_value = x_binding - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': True, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual('Servergroup resource deletion succeeded.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_not_inherited(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID', user='USER', project='PROJECT') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - x_binding = mock.Mock() - mock_cp.return_value = x_binding - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - x_nova = mock.Mock() - x_nova.server_group_delete.return_value = None - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual('Servergroup resource deletion succeeded.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - mock_nova.assert_called_once_with('USER', 'PROJECT') - x_nova.server_group_delete.assert_called_once_with('SERVERGROUP_ID') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_binding_not_found(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - - mock_cp.return_value = None - - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual('Servergroup resource deletion succeeded.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_binding_data_empty(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - x_binding = mock.Mock(data={}) - mock_cp.return_value = x_binding - - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual('Servergroup resource deletion succeeded.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_policy_data_empty(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - x_binding = mock.Mock(data={'foo': 'bar'}) - mock_cp.return_value = x_binding - - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=None) - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertTrue(res) - self.assertEqual('Servergroup resource deletion succeeded.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - mock_extract.assert_called_once_with({'foo': 'bar'}) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - @mock.patch.object(context, 'get_admin_context') - def test_detach_failing_delete_sg(self, mock_context, mock_cp): - cluster = mock.Mock(id='CLUSTER_ID', user='USER', project='PROJ') - x_ctx = mock.Mock() - mock_context.return_value = x_ctx - x_binding = mock.Mock(data={'foo': 'bar'}) - mock_cp.return_value = x_binding - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - x_nova = mock.Mock() - x_nova.server_group_delete.side_effect = Exception() - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - - # do it - res, data = policy.detach(cluster) - - # assertions - self.assertFalse(res) - self.assertEqual('Failed in deleting servergroup.', data) - - mock_context.assert_called_once_with() - mock_cp.assert_called_once_with(x_ctx, 'CLUSTER_ID', 'POLICY_ID') - mock_extract.assert_called_once_with({'foo': 'bar'}) - mock_nova.assert_called_once_with('USER', 'PROJ') - x_nova.server_group_delete.assert_called_once_with('SERVERGROUP_ID') - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op(self, mock_cp): - x_action = mock.Mock() - x_action.data = {'creation': {'count': 2}} - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - self.assertEqual( - { - 'creation': { - 'count': 2 - }, - 'placement': { - 'count': 2, - 'placements': [ - { - 'servergroup': 'SERVERGROUP_ID' - }, - { - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_use_scaleout_input(self, mock_cp): - x_action = mock.Mock() - x_action.data = {} - x_action.action = consts.CLUSTER_SCALE_OUT - x_action.inputs = {'count': 2} - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - self.assertEqual( - { - 'placement': { - 'count': 2, - 'placements': [ - { - 'servergroup': 'SERVERGROUP_ID' - }, - { - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_for_node_create(self, mock_cp): - x_action = mock.Mock() - x_action.data = {} - x_action.action = consts.NODE_CREATE - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - self.assertEqual( - { - 'placement': { - 'count': 1, - 'placements': [ - { - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_use_resize_params(self, mock_cp): - def fake_parse_func(action, cluster, current): - action.data = { - 'creation': { - 'count': 2 - } - } - - x_action = mock.Mock() - x_action.data = {} - x_action.action = consts.CLUSTER_RESIZE - x_action.inputs = { - 'adjustment_type': consts.EXACT_CAPACITY, - 'number': 4 - } - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock()] - x_action.entity = x_cluster - mock_parse = self.patchobject(scaleutils, 'parse_resize_params', - side_effect=fake_parse_func) - - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_parse.assert_called_once_with(x_action, x_cluster, 2) - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - self.assertEqual( - { - 'creation': { - 'count': 2, - }, - 'placement': { - 'count': 2, - 'placements': [ - { - 'servergroup': 'SERVERGROUP_ID' - }, - { - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_resize_shrinking(self, mock_cp): - def fake_parse_func(action, cluster, current): - action.data = { - 'deletion': { - 'count': 2 - } - } - - x_action = mock.Mock() - x_action.data = {} - x_action.action = consts.CLUSTER_RESIZE - x_action.inputs = { - 'adjustment_type': consts.EXACT_CAPACITY, - 'number': 10 - } - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock()] - x_action.entity = x_cluster - mock_parse = self.patchobject(scaleutils, 'parse_resize_params', - side_effect=fake_parse_func) - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data') - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_parse.assert_called_once_with(x_action, x_cluster, 2) - self.assertEqual(0, mock_cp.call_count) - self.assertEqual(0, mock_extract.call_count) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_with_zone_name(self, mock_cp): - self.spec['properties']['availability_zone'] = 'BLUE_ZONE' - x_action = mock.Mock() - x_action.data = {'creation': {'count': 2}} - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - self.assertEqual( - { - 'creation': { - 'count': 2 - }, - 'placement': { - 'count': 2, - 'placements': [ - { - 'zone': 'BLUE_ZONE', - 'servergroup': 'SERVERGROUP_ID' - }, - { - 'zone': 'BLUE_ZONE', - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_with_drs_enabled(self, mock_cp): - self.spec['properties']['enable_drs_extension'] = True - x_action = mock.Mock() - x_action.data = {'creation': {'count': 2}} - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - x_cluster = mock.Mock(user='USER', project='PROJ') - x_action.entity = x_cluster - x_nova = mock.Mock() - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_hypervisors = [ - mock.Mock(id='HV_1', hypervisor_hostname='host1'), - mock.Mock(id='HV_2', hypervisor_hostname='vsphere_drs1') - ] - x_nova.hypervisor_list.return_value = x_hypervisors - x_hvinfo = { - 'service': { - 'host': 'drshost1' - } - } - x_nova.hypervisor_get.return_value = x_hvinfo - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - mock_nova.assert_called_once_with('USER', 'PROJ') - x_nova.hypervisor_list.assert_called_once_with() - x_nova.hypervisor_get.assert_called_once_with('HV_2') - self.assertEqual( - { - 'creation': { - 'count': 2 - }, - 'placement': { - 'count': 2, - 'placements': [ - { - 'zone': 'nova:drshost1', - 'servergroup': 'SERVERGROUP_ID' - }, - { - 'zone': 'nova:drshost1', - 'servergroup': 'SERVERGROUP_ID' - } - ] - } - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) - - @mock.patch.object(cpo.ClusterPolicy, 'get') - def test_pre_op_with_drs_enabled_no_match(self, mock_cp): - self.spec['properties']['enable_drs_extension'] = True - x_action = mock.Mock() - x_action.data = {'creation': {'count': 2}} - x_binding = mock.Mock() - mock_cp.return_value = x_binding - - policy_data = { - 'servergroup_id': 'SERVERGROUP_ID', - 'inherited_group': False, - } - policy = ap.AffinityPolicy('test-policy', self.spec) - policy.id = 'POLICY_ID' - mock_extract = self.patchobject(policy, '_extract_policy_data', - return_value=policy_data) - x_cluster = mock.Mock(user='USER', project='PROJ') - x_action.entity = x_cluster - x_nova = mock.Mock() - mock_nova = self.patchobject(policy, 'nova', return_value=x_nova) - x_hypervisors = [ - mock.Mock(id='HV_1', hypervisor_hostname='host1'), - mock.Mock(id='HV_2', hypervisor_hostname='host2') - ] - x_nova.hypervisor_list.return_value = x_hypervisors - - # do it - policy.pre_op('CLUSTER_ID', x_action) - - mock_cp.assert_called_once_with(x_action.context, 'CLUSTER_ID', - 'POLICY_ID') - mock_extract.assert_called_once_with(x_binding.data) - mock_nova.assert_called_once_with('USER', 'PROJ') - self.assertEqual( - { - 'creation': { - 'count': 2 - }, - 'status': 'ERROR', - 'status_reason': 'No suitable vSphere host is available.' - }, - x_action.data) - x_action.store.assert_called_once_with(x_action.context) diff --git a/senlin/tests/unit/policies/test_batch_policy.py b/senlin/tests/unit/policies/test_batch_policy.py deleted file mode 100644 index 12e4fc2e2..000000000 --- a/senlin/tests/unit/policies/test_batch_policy.py +++ /dev/null @@ -1,183 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -from unittest import mock - -from senlin.policies import batch_policy as bp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestBatchPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestBatchPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.batch', - 'version': '1.0', - 'properties': { - 'min_in_service': 1, - 'max_batch_size': 2, - 'pause_time': 60, - } - } - - def test_policy_init(self): - policy = bp.BatchPolicy('test-batch', self.spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-batch', policy.name) - self.assertEqual('senlin.policy.batch-1.0', policy.type) - self.assertEqual(1, policy.min_in_service) - self.assertEqual(2, policy.max_batch_size) - self.assertEqual(60, policy.pause_time) - - def test_get_batch_size(self): - policy = bp.BatchPolicy('test-batch', self.spec) - - size = policy._get_batch_size(5) - - self.assertEqual(2, size) - - def test_get_batch_size_less_than_max(self): - spec = copy.deepcopy(self.spec) - spec['properties']['max_batch_size'] = 3 - policy = bp.BatchPolicy('test-batch', spec) - - size = policy._get_batch_size(3) - - self.assertEqual(2, size) - - def test_get_batch_size_less_than_min(self): - spec = copy.deepcopy(self.spec) - spec['properties']['min_in_service'] = 2 - policy = bp.BatchPolicy('test-batch', spec) - - size = policy._get_batch_size(1) - - self.assertEqual(1, size) - - def test_get_batch_size_with_default_max(self): - spec = copy.deepcopy(self.spec) - spec['properties']['max_batch_size'] = -1 - policy = bp.BatchPolicy('test-batch', spec) - - size = policy._get_batch_size(5) - self.assertEqual(4, size) - - def test_pick_nodes_all_active(self): - node1 = mock.Mock(id='1', status='ACTIVE') - node2 = mock.Mock(id='2', status='ACTIVE') - node3 = mock.Mock(id='3', status='ACTIVE') - nodes = [node1, node2, node3] - policy = bp.BatchPolicy('test-batch', self.spec) - - nodes = policy._pick_nodes(nodes, 2) - - self.assertEqual(2, len(nodes)) - self.assertIn(node1.id, nodes[0]) - self.assertIn(node2.id, nodes[0]) - self.assertIn(node3.id, nodes[1]) - - def test_pick_nodes_with_error_nodes(self): - node1 = mock.Mock(id='1', status='ACTIVE', tainted=False) - node2 = mock.Mock(id='2', status='ACTIVE', tainted=False) - node3 = mock.Mock(id='3', status='ERROR', tainted=False) - nodes = [node1, node2, node3] - - policy = bp.BatchPolicy('test-batch', self.spec) - - nodes = policy._pick_nodes(nodes, 2) - - self.assertEqual(2, len(nodes)) - self.assertIn(node3.id, nodes[0]) - self.assertIn(node1.id, nodes[0]) - self.assertIn(node2.id, nodes[1]) - - @mock.patch.object(bp.BatchPolicy, '_pick_nodes') - @mock.patch.object(bp.BatchPolicy, '_get_batch_size') - def test_create_plan_for_update(self, mock_cal, mock_pick): - action = mock.Mock(context=self.context, action='CLUSTER_UPDATE') - cluster = mock.Mock(id='cid') - node1, node2, node3 = mock.Mock(), mock.Mock(), mock.Mock() - cluster.nodes = [node1, node2, node3] - action.entity = cluster - - mock_cal.return_value = 2 - mock_pick.return_value = [{'1', '2'}, {'3'}] - policy = bp.BatchPolicy('test-batch', self.spec) - - res, plan = policy._create_plan(action) - - self.assertTrue(res) - excepted_plan = { - 'pause_time': self.spec['properties']['pause_time'], - 'plan': [{'1', '2'}, {'3'}] - } - self.assertEqual(excepted_plan, plan) - mock_cal.assert_called_once_with(3) - mock_pick.assert_called_once_with([node1, node2, node3], 2) - - def test_create_plan_for_update_no_node(self): - action = mock.Mock(context=self.context, action='CLUSTER_UPDATE') - cluster = mock.Mock(id='cid') - cluster.nodes = [] - action.entity = cluster - policy = bp.BatchPolicy('test-batch', self.spec) - - res, value = policy._create_plan(action) - - self.assertTrue(res) - excepted_plan = { - 'pause_time': self.spec['properties']['pause_time'], - 'plan': [] - } - self.assertEqual(excepted_plan, value) - - @mock.patch.object(bp.BatchPolicy, '_create_plan') - def test_pre_op_for_update(self, mock_plan): - action = mock.Mock() - action.context = self.context - action.action = 'CLUSTER_UPDATE' - cluster = mock.Mock(id='cid') - action.entity = cluster - - excepted_plan = { - 'pause_time': self.spec['properties']['pause_time'], - 'plan': [{'1', '2'}, {'3'}] - } - mock_plan.return_value = (True, excepted_plan) - - policy = bp.BatchPolicy('test-batch', self.spec) - policy.pre_op(cluster.id, action) - - mock_plan.assert_called_once_with(action) - - @mock.patch.object(bp.BatchPolicy, '_create_plan') - def test_pre_op_for_delete(self, mock_plan): - action = mock.Mock() - action.context = self.context - action.action = 'CLUSTER_DELETE' - cluster = mock.Mock(id='cid') - action.entity = cluster - - excepted_plan = { - 'pause_time': self.spec['properties']['pause_time'], - 'batch_size': 2, - } - mock_plan.return_value = (True, excepted_plan) - - policy = bp.BatchPolicy('test-batch', self.spec) - policy.pre_op(cluster.id, action) - - mock_plan.assert_called_once_with(action) diff --git a/senlin/tests/unit/policies/test_deletion_policy.py b/senlin/tests/unit/policies/test_deletion_policy.py deleted file mode 100644 index 37741e6d9..000000000 --- a/senlin/tests/unit/policies/test_deletion_policy.py +++ /dev/null @@ -1,548 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from senlin.common import consts -from senlin.common import scaleutils as su -from senlin.policies import deletion_policy as dp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestDeletionPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestDeletionPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.deletion', - 'version': '1.0', - 'properties': { - 'criteria': 'OLDEST_FIRST', - 'destroy_after_deletion': True, - 'grace_period': 60, - 'reduce_desired_capacity': False - } - } - - def test_policy_init(self): - policy = dp.DeletionPolicy('test-policy', self.spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.deletion-1.0', policy.type) - self.assertEqual('OLDEST_FIRST', policy.criteria) - self.assertTrue(policy.destroy_after_deletion) - self.assertEqual(60, policy.grace_period) - self.assertFalse(policy.reduce_desired_capacity) - - @mock.patch.object(su, 'nodes_by_random') - def test_victims_by_regions_random(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_region.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['2', '3']] - - self.spec['properties']['criteria'] = 'RANDOM' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2}) - self.assertEqual(['1', '2', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1), - mock.call([node2, node3], 2) - ]) - cluster.nodes_by_region.assert_has_calls([ - mock.call('R1'), mock.call('R2')]) - - @mock.patch.object(su, 'nodes_by_profile_age') - def test_victims_by_regions_profile_age(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_region.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['2', '3']] - - self.spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2}) - self.assertEqual(['1', '2', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1), - mock.call([node2, node3], 2) - ]) - cluster.nodes_by_region.assert_has_calls([ - mock.call('R1'), mock.call('R2')]) - - @mock.patch.object(su, 'nodes_by_age') - def test_victims_by_regions_age_oldest(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_region.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['2', '3']] - - self.spec['properties']['criteria'] = 'OLDEST_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2}) - self.assertEqual(['1', '2', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1, True), - mock.call([node2, node3], 2, True) - ]) - cluster.nodes_by_region.assert_has_calls([ - mock.call('R1'), mock.call('R2')]) - - @mock.patch.object(su, 'nodes_by_age') - def test_victims_by_regions_age_youngest(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_region.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['2', '3']] - - self.spec['properties']['criteria'] = 'YOUNGEST_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2}) - self.assertEqual(['1', '2', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1, False), - mock.call([node2, node3], 2, False) - ]) - cluster.nodes_by_region.assert_has_calls([ - mock.call('R1'), mock.call('R2')]) - - @mock.patch.object(su, 'nodes_by_random') - def test_victims_by_zones_random(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_zone.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['3']] - - self.spec['properties']['criteria'] = 'RANDOM' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ2': 1}) - self.assertEqual(['1', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1), - mock.call([node2, node3], 1) - ]) - cluster.nodes_by_zone.assert_has_calls( - [mock.call('AZ1'), mock.call('AZ2')], - ) - - @mock.patch.object(su, 'nodes_by_profile_age') - def test_victims_by_zones_profile_age(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_zone.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['2']] - - self.spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ2': 1}) - self.assertEqual(['1', '2'], res) - mock_select.assert_has_calls( - [ - mock.call([node1], 1), - mock.call([node2, node3], 1) - ], - ) - cluster.nodes_by_zone.assert_has_calls( - [mock.call('AZ1'), mock.call('AZ2')], - ) - - @mock.patch.object(su, 'nodes_by_age') - def test_victims_by_zones_age_oldest(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=2) - node3 = mock.Mock(id=3) - cluster.nodes_by_zone.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['3']] - - self.spec['properties']['criteria'] = 'OLDEST_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ8': 1}) - self.assertEqual(['1', '3'], res) - mock_select.assert_has_calls([ - mock.call([node1], 1, True), - mock.call([node2, node3], 1, True) - ]) - cluster.nodes_by_zone.assert_has_calls( - [mock.call('AZ1'), mock.call('AZ8')], - ) - - @mock.patch.object(su, 'nodes_by_age') - def test_victims_by_zones_age_youngest(self, mock_select): - cluster = mock.Mock() - node1 = mock.Mock(id=1) - node2 = mock.Mock(id=3) - node3 = mock.Mock(id=5) - cluster.nodes_by_zone.side_effect = [ - [node1], [node2, node3] - ] - - mock_select.side_effect = [['1'], ['3', '5']] - - self.spec['properties']['criteria'] = 'YOUNGEST_FIRST' - policy = dp.DeletionPolicy('test-policy', self.spec) - - res = policy._victims_by_zones(cluster, {'AZ5': 1, 'AZ6': 2}) - self.assertEqual(['1', '3', '5'], res) - mock_select.assert_has_calls( - [ - mock.call([node1], 1, False), - mock.call([node2, node3], 2, False) - ], - ) - cluster.nodes_by_zone.assert_has_calls( - [mock.call('AZ5'), mock.call('AZ6')], - ) - - def test_update_action_clean(self): - action = mock.Mock() - action.data = {} - - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy._update_action(action, ['N1', 'N2']) - - pd = { - 'status': 'OK', - 'reason': 'Candidates generated', - 'deletion': { - 'count': 2, - 'candidates': ['N1', 'N2'], - 'destroy_after_deletion': True, - 'grace_period': 60, - 'reduce_desired_capacity': False, - } - } - self.assertEqual(pd, action.data) - action.store.assert_called_with(action.context) - - def test_update_action_override(self): - action = mock.Mock() - action.data = { - 'deletion': { - 'count': 3, - } - } - - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy._update_action(action, ['N1', 'N2']) - - pd = { - 'status': 'OK', - 'reason': 'Candidates generated', - 'deletion': { - 'count': 2, - 'candidates': ['N1', 'N2'], - 'destroy_after_deletion': True, - 'grace_period': 60, - 'reduce_desired_capacity': False, - } - } - self.assertEqual(pd, action.data) - action.store.assert_called_with(action.context) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - def test_pre_op_del_nodes(self, mock_update): - action = mock.Mock() - action.context = self.context - action.inputs = { - 'count': 2, - 'candidates': ['N1', 'N2'], - } - action.data = {} - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['N1', 'N2']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - def test_pre_op_node_delete(self, mock_update): - action = mock.Mock(action=consts.NODE_DELETE, context=self.context, - inputs={}, data={}, entity=mock.Mock(id='NODE_ID')) - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE_ID']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_with_count_decisions(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - mock_select.assert_called_once_with(cluster.nodes, 2, True) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(dp.DeletionPolicy, '_victims_by_regions') - def test_pre_op_with_region_decisions(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}) - action.data = { - 'deletion': { - 'count': 2, - 'regions': { - 'R1': 1, - 'R2': 1 - } - } - } - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - mock_select.assert_called_once_with(cluster, {'R1': 1, 'R2': 1}) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(dp.DeletionPolicy, '_victims_by_zones') - def test_pre_op_with_zone_decisions(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}) - action.data = { - 'deletion': { - 'count': 2, - 'zones': { - 'AZ1': 1, - 'AZ2': 1 - } - } - } - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - mock_select.assert_called_once_with(cluster, {'AZ1': 1, 'AZ2': 1}) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_scale_in_with_count(self, mock_select, mock_update): - action = mock.Mock(context=self.context, data={}, inputs={'count': 2}, - action=consts.CLUSTER_SCALE_IN) - cluster = mock.Mock(nodes=[mock.Mock()]) - action.entity = cluster - mock_select.return_value = ['NODE_ID'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE_ID']) - # the following was invoked with 1 because the input count is - # greater than the cluster size - mock_select.assert_called_once_with(cluster.nodes, 1, True) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_scale_in_without_count(self, mock_select, mock_update): - action = mock.Mock(context=self.context, data={}, inputs={}, - action=consts.CLUSTER_SCALE_IN) - cluster = mock.Mock(nodes=[mock.Mock()]) - action.entity = cluster - mock_select.return_value = ['NODE_ID'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_update.assert_called_once_with(action, ['NODE_ID']) - # the following was invoked with 1 because the input count is - # not specified so 1 becomes the default - mock_select.assert_called_once_with(cluster.nodes, 1, True) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'parse_resize_params') - def test_pre_op_resize_failed_parse(self, mock_parse, mock_update): - action = mock.Mock(context=self.context, inputs={}, data={}, - action=consts.CLUSTER_RESIZE) - cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()]) - action.entity = cluster - mock_parse.return_value = 'ERROR', 'Failed parsing.' - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - self.assertEqual('ERROR', action.data['status']) - self.assertEqual('Failed parsing.', action.data['reason']) - mock_parse.assert_called_once_with(action, cluster, 2) - self.assertEqual(0, mock_update.call_count) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'parse_resize_params') - def test_pre_op_resize_not_deletion(self, mock_parse, mock_update): - def fake_parse(action, cluster, current): - action.data = {} - return 'OK', 'cool' - - action = mock.Mock(context=self.context, inputs={}, - action=consts.CLUSTER_RESIZE) - cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()]) - action.entity = cluster - mock_parse.side_effect = fake_parse - policy = dp.DeletionPolicy('test-policy', self.spec) - # a simulation of non-deletion RESZIE - action.data = {} - - policy.pre_op('FAKE_ID', action) - - mock_parse.assert_called_once_with(action, cluster, 2) - self.assertEqual(0, mock_update.call_count) - - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_resize_with_count(self, mock_select, mock_update, - mock_parse): - def fake_parse(a, cluster, current): - a.data = { - 'deletion': { - 'count': 2 - } - } - return 'OK', 'cool' - - action = mock.Mock(context=self.context, inputs={}, data={}, - action=consts.CLUSTER_RESIZE) - cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()]) - action.entity = cluster - mock_parse.side_effect = fake_parse - mock_select.return_value = ['NID'] - policy = dp.DeletionPolicy('test-policy', self.spec) - - policy.pre_op('FAKE_ID', action) - - mock_parse.assert_called_once_with(action, cluster, 2) - mock_update.assert_called_once_with(action, ['NID']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_random') - def test_pre_op_do_random(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - spec = copy.deepcopy(self.spec) - spec['properties']['criteria'] = 'RANDOM' - policy = dp.DeletionPolicy('test-policy', spec) - - policy.pre_op('FAKE_ID', action) - - mock_select.assert_called_once_with(cluster.nodes, 2) - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_profile_age') - def test_pre_op_do_oldest_profile(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - mock_select.return_value = ['NODE1', 'NODE2'] - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - spec = copy.deepcopy(self.spec) - spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST' - policy = dp.DeletionPolicy('test-policy', spec) - - policy.pre_op('FAKE_ID', action) - - mock_select.assert_called_once_with(cluster.nodes, 2) - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_do_oldest_first(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - spec = copy.deepcopy(self.spec) - spec['properties']['criteria'] = 'OLDEST_FIRST' - policy = dp.DeletionPolicy('test-policy', spec) - - policy.pre_op('FAKE_ID', action) - - mock_select.assert_called_once_with(cluster.nodes, 2, True) - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) - - @mock.patch.object(dp.DeletionPolicy, '_update_action') - @mock.patch.object(su, 'nodes_by_age') - def test_pre_op_do_youngest_first(self, mock_select, mock_update): - action = mock.Mock(context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - cluster = mock.Mock(nodes=['a', 'b', 'c']) - action.entity = cluster - mock_select.return_value = ['NODE1', 'NODE2'] - spec = copy.deepcopy(self.spec) - spec['properties']['criteria'] = 'YOUNGEST_FIRST' - policy = dp.DeletionPolicy('test-policy', spec) - - policy.pre_op('FAKE_ID', action) - - mock_select.assert_called_once_with(cluster.nodes, 2, False) - mock_update.assert_called_once_with(action, ['NODE1', 'NODE2']) diff --git a/senlin/tests/unit/policies/test_health_policy.py b/senlin/tests/unit/policies/test_health_policy.py deleted file mode 100644 index 7c4008cdb..000000000 --- a/senlin/tests/unit/policies/test_health_policy.py +++ /dev/null @@ -1,529 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import namedtuple -import copy -from unittest import mock - - -from oslo_config import cfg - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common import scaleutils as su -from senlin.engine import health_manager -from senlin.policies import base as pb -from senlin.policies import health_policy -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestHealthPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestHealthPolicy, self).setUp() - self.context = utils.dummy_context() - - self.spec = { - 'type': 'senlin.policy.health', - 'version': '1.2', - 'properties': { - 'detection': { - "detection_modes": [ - { - 'type': 'NODE_STATUS_POLLING' - }, - ], - 'interval': 60 - }, - 'recovery': { - 'fencing': ['COMPUTE'], - 'actions': [ - {'name': 'REBUILD'} - ] - } - } - } - - fake_profile = mock.Mock(type_name='os.nova.server', - type='os.nova.server-1.0',) - fake_node = mock.Mock(status='ACTIVE') - fake_cluster = mock.Mock(id='CLUSTER_ID', nodes=[fake_node], - rt={'profile': fake_profile}) - self.cluster = fake_cluster - self.patch('senlin.rpc.client.get_engine_client') - self.hp = health_policy.HealthPolicy('test-policy', self.spec) - - def test_policy_init(self): - DetectionMode = namedtuple( - 'DetectionMode', - [self.hp.DETECTION_TYPE] + list(self.hp._DETECTION_OPTIONS)) - - detection_modes = [ - DetectionMode( - type='NODE_STATUS_POLLING', - poll_url='', - poll_url_ssl_verify=True, - poll_url_conn_error_as_unhealthy=True, - poll_url_healthy_response='', - poll_url_retry_limit='', - poll_url_retry_interval='' - ) - ] - - spec = { - 'type': 'senlin.policy.health', - 'version': '1.2', - 'properties': { - 'detection': { - "detection_modes": [ - { - 'type': 'NODE_STATUS_POLLING' - }, - ], - 'interval': 60 - }, - 'recovery': { - 'fencing': ['COMPUTE'], - 'actions': [ - {'name': 'REBUILD'} - ] - } - } - } - - hp = health_policy.HealthPolicy('test-policy', spec) - - self.assertIsNone(hp.id) - self.assertEqual('test-policy', hp.name) - self.assertEqual('senlin.policy.health-1.2', hp.type) - self.assertEqual(detection_modes, hp.detection_modes) - self.assertEqual(60, hp.interval) - self.assertEqual([{'name': 'REBUILD', 'params': None}], - hp.recover_actions) - - def test_policy_init_ops(self): - spec = { - 'type': 'senlin.policy.health', - 'version': '1.2', - 'properties': { - 'detection': { - "detection_modes": [ - { - 'type': 'NODE_STATUS_POLLING' - }, - { - 'type': 'HYPERVISOR_STATUS_POLLING' - }, - { - 'type': 'NODE_STATUS_POLL_URL' - }, - ], - 'interval': 60 - }, - 'recovery': { - 'fencing': ['COMPUTE'], - 'actions': [ - {'name': 'REBUILD'} - ] - } - } - } - - operations = [None, 'ALL_FAILED', 'ANY_FAILED'] - for op in operations: - # set operation in spec - if op: - spec['properties']['detection']['recovery_conditional'] = op - - # test __init__ - hp = health_policy.HealthPolicy('test-policy', spec) - - # check result - self.assertIsNone(hp.id) - self.assertEqual('test-policy', hp.name) - self.assertEqual('senlin.policy.health-1.2', hp.type) - self.assertEqual(60, hp.interval) - self.assertEqual([{'name': 'REBUILD', 'params': None}], - hp.recover_actions) - - def test_validate(self): - spec = copy.deepcopy(self.spec) - spec["properties"]["recovery"]["actions"] = [ - {"name": "REBUILD"}, {"name": "RECREATE"} - ] - self.hp = health_policy.HealthPolicy('test-policy', spec) - - ex = self.assertRaises(exc.ESchema, - self.hp.validate, - self.context) - - self.assertEqual("Only one 'actions' is supported for now.", - str(ex)) - - def test_validate_valid_interval(self): - spec = copy.deepcopy(self.spec) - spec["properties"]["detection"]["interval"] = 20 - self.hp = health_policy.HealthPolicy('test-policy', spec) - - cfg.CONF.set_override('health_check_interval_min', 20) - - self.hp.validate(self.context) - - def test_validate_invalid_interval(self): - spec = copy.deepcopy(self.spec) - spec["properties"]["detection"]["interval"] = 10 - self.hp = health_policy.HealthPolicy('test-policy', spec) - - cfg.CONF.set_override('health_check_interval_min', 20) - - ex = self.assertRaises(exc.InvalidSpec, - self.hp.validate, - self.context) - - expected_error = ("Specified interval of %(interval)d seconds has to " - "be larger than health_check_interval_min of " - "%(min_interval)d seconds set in configuration." - ) % {"interval": 10, "min_interval": 20} - self.assertEqual(expected_error, str(ex)) - - @mock.patch.object(health_manager, 'register') - def test_attach(self, mock_hm_reg): - - policy_data = { - 'HealthPolicy': { - 'data': { - 'interval': self.hp.interval, - 'detection_modes': [ - { - 'type': 'NODE_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - } - ], - 'node_update_timeout': 300, - 'node_delete_timeout': 20, - 'node_force_recreate': False, - 'recovery_conditional': 'ANY_FAILED' - }, - 'version': '1.2' - } - } - - res, data = self.hp.attach(self.cluster) - self.assertTrue(res) - self.assertEqual(policy_data, data) - kwargs = { - 'interval': self.hp.interval, - 'node_update_timeout': 300, - 'params': { - 'recover_action': self.hp.recover_actions, - 'node_delete_timeout': 20, - 'node_force_recreate': False, - 'recovery_conditional': 'ANY_FAILED', - 'detection_modes': [ - { - 'type': 'NODE_STATUS_POLLING', - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_conn_error_as_unhealthy': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': '', - 'poll_url_retry_interval': '' - } - ], - }, - 'enabled': True - } - mock_hm_reg.assert_called_once_with('CLUSTER_ID', - engine_id=None, - **kwargs) - - @mock.patch.object(health_manager, 'register') - def test_attach_failed_action_matching_rebuild(self, mock_hm_reg): - - fake_profile = mock.Mock(type_name='os.heat.stack-1.0', - type='os.heat.stack') - fake_cluster = mock.Mock(id='CLUSTER_ID', rt={'profile': fake_profile}) - - res, data = self.hp.attach(fake_cluster) - - self.assertFalse(res) - self.assertEqual("Recovery action REBUILD is only applicable to " - "os.nova.server clusters.", data) - - @mock.patch.object(health_manager, 'register') - def test_attach_failed_action_matching_reboot(self, mock_hm_reg): - spec = copy.deepcopy(self.spec) - spec['properties']['recovery']['actions'] = [{'name': 'REBOOT'}] - hp = health_policy.HealthPolicy('test-policy-1', spec) - - fake_profile = mock.Mock(type_name='os.heat.stack-1.0', - type='os.heat.stack') - fake_cluster = mock.Mock(id='CLUSTER_ID', rt={'profile': fake_profile}) - - res, data = hp.attach(fake_cluster) - - self.assertFalse(res) - self.assertEqual("Recovery action REBOOT is only applicable to " - "os.nova.server clusters.", data) - - @mock.patch.object(health_manager, 'register') - def test_attach_failed_with_notify_timeout(self, mock_hm_reg): - mock_hm_reg.return_value = False - res, data = self.hp.attach(self.cluster) - self.assertFalse(res) - self.assertEqual("Registering health manager for cluster timed " - "out.", data) - - @mock.patch.object(health_manager, 'unregister') - def test_detach(self, mock_hm_unreg): - res, data = self.hp.detach(self.cluster) - self.assertTrue(res) - self.assertEqual('', data) - mock_hm_unreg.assert_called_once_with('CLUSTER_ID') - - @mock.patch.object(health_manager, 'unregister') - def test_detach_failed_with_notify_timeout(self, mock_hm_unreg): - mock_hm_unreg.return_value = False - res, data = self.hp.detach(self.cluster) - self.assertFalse(res) - self.assertEqual("Unregistering health manager for cluster timed " - "out.", data) - mock_hm_unreg.assert_called_once_with('CLUSTER_ID') - - def test_pre_op_default(self): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_SCALE_OUT) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - data = { - 'health': { - 'recover_action': [{'name': 'REBUILD', 'params': None}], - 'fencing': ['COMPUTE'], - } - } - self.assertEqual(data, action.data) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_scale_in(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_SCALE_IN) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_update(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_UPDATE) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_cluster_recover(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_RECOVER) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_cluster_replace_nodes(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_REPLACE_NODES) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_cluster_del_nodes(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_DEL_NODES) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_node_delete(self, mock_disable): - action = mock.Mock(context='action_context', data={}, - action=consts.NODE_DELETE) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'disable') - def test_pre_op_resize_with_data(self, mock_disable): - action = mock.Mock(context='action_context', data={'deletion': 'foo'}, - action=consts.CLUSTER_RESIZE) - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(health_manager, 'disable') - def test_pre_op_resize_without_data(self, mock_disable, mock_parse): - def fake_check(action, cluster, current): - action.data['deletion'] = {'foo': 'bar'} - return pb.CHECK_OK, 'good' - - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock(), mock.Mock()] - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_RESIZE) - action.entity = x_cluster - mock_parse.side_effect = fake_check - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertTrue(res) - mock_disable.assert_called_once_with(self.cluster.id) - mock_parse.assert_called_once_with(action, x_cluster, 3) - - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(health_manager, 'disable') - def test_pre_op_resize_parse_error(self, mock_disable, mock_parse): - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock()] - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_RESIZE) - action.entity = x_cluster - mock_parse.return_value = pb.CHECK_ERROR, 'no good' - - res = self.hp.pre_op(self.cluster.id, action) - - self.assertFalse(res) - self.assertEqual(pb.CHECK_ERROR, action.data['status']) - self.assertEqual('no good', action.data['reason']) - mock_parse.assert_called_once_with(action, x_cluster, 2) - self.assertEqual(0, mock_disable.call_count) - - def test_post_op_default(self): - action = mock.Mock(action='FAKE_ACTION') - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_scale_in(self, mock_enable): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_update(self, mock_enable): - action = mock.Mock(action=consts.CLUSTER_UPDATE) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_cluster_recover(self, mock_enable): - action = mock.Mock(action=consts.CLUSTER_RECOVER) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_cluster_replace_nodes(self, mock_enable): - action = mock.Mock(action=consts.CLUSTER_REPLACE_NODES) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_cluster_del_nodes(self, mock_enable): - action = mock.Mock(action=consts.CLUSTER_DEL_NODES) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(health_manager, 'enable') - def test_post_op_node_delete(self, mock_enable): - action = mock.Mock(action=consts.NODE_DELETE) - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(health_manager, 'enable') - def test_post_op_resize_without_data(self, mock_enable, mock_parse): - def fake_check(action, cluster, current): - action.data['deletion'] = {'foo': 'bar'} - return pb.CHECK_OK, 'good' - - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock()] - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_RESIZE) - action.entity = x_cluster - mock_parse.side_effect = fake_check - - res = self.hp.post_op(self.cluster.id, action) - - self.assertTrue(res) - mock_enable.assert_called_once_with(self.cluster.id) - mock_parse.assert_called_once_with(action, x_cluster, 2) - - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(health_manager, 'enable') - def test_post_op_resize_parse_error(self, mock_enable, mock_parse): - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock()] - action = mock.Mock(context='action_context', data={}, - action=consts.CLUSTER_RESIZE) - action.entity = x_cluster - mock_parse.return_value = pb.CHECK_ERROR, 'no good' - - res = self.hp.post_op(self.cluster.id, action) - - self.assertFalse(res) - self.assertEqual(pb.CHECK_ERROR, action.data['status']) - self.assertEqual('no good', action.data['reason']) - - mock_parse.assert_called_once_with(action, x_cluster, 1) - self.assertEqual(0, mock_enable.call_count) diff --git a/senlin/tests/unit/policies/test_lb_policy.py b/senlin/tests/unit/policies/test_lb_policy.py deleted file mode 100644 index b2911661b..000000000 --- a/senlin/tests/unit/policies/test_lb_policy.py +++ /dev/null @@ -1,1583 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_context import context as oslo_context - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common import scaleutils -from senlin.drivers import base as driver_base -from senlin.engine import cluster_policy -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.policies import base as policy_base -from senlin.policies import lb_policy -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestLoadBalancingPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestLoadBalancingPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': { - 'id': '', - 'protocol': 'HTTP', - 'protocol_port': 80, - 'subnet': 'internal-subnet', - 'lb_method': 'ROUND_ROBIN', - 'admin_state_up': True, - 'session_persistence': { - 'type': 'SOURCE_IP', - 'cookie_name': 'whatever' - } - }, - 'vip': { - 'address': '192.168.1.100', - 'subnet': 'external-subnet', - 'network': 'external-network', - 'connection_limit': 500, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'admin_state_up': True, - }, - 'health_monitor': { - 'type': 'HTTP', - 'delay': 10, - 'timeout': 5, - 'max_retries': 3, - 'admin_state_up': True, - 'http_method': 'GET', - 'url_path': '/index.html', - 'expected_codes': '200,201,202' - }, - 'lb_status_timeout': 300, - 'availability_zone': 'test_az', - 'flavor_id': 'test_flavor_id' - } - } - self.sd = mock.Mock() - self.patchobject(driver_base, 'SenlinDriver', return_value=self.sd) - self.lb_driver = mock.Mock() - self.net_driver = mock.Mock() - self.octavia_driver = mock.Mock() - - @mock.patch.object(lb_policy.LoadBalancingPolicy, 'validate') - def test_init(self, mock_validate): - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.loadbalance-1.3', policy.type) - self.assertEqual(self.spec['properties']['pool'], policy.pool_spec) - self.assertEqual(self.spec['properties']['vip'], policy.vip_spec) - self.assertIsNone(policy.lb) - - def test_init_with_default_value_subnet_only(self): - spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': {'subnet': 'internal-subnet'}, - 'vip': {'subnet': 'external-subnet'} - } - } - default_spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': { - 'id': None, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'subnet': 'internal-subnet', - 'lb_method': 'ROUND_ROBIN', - 'admin_state_up': True, - 'session_persistence': {}, - }, - 'vip': { - 'address': None, - 'subnet': 'external-subnet', - 'network': None, - 'connection_limit': -1, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'admin_state_up': True, - }, - 'lb_status_timeout': 300 - } - } - - policy = lb_policy.LoadBalancingPolicy('test-policy', spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.loadbalance-1.3', policy.type) - self.assertEqual(default_spec['properties']['pool'], policy.pool_spec) - self.assertEqual(default_spec['properties']['vip'], policy.vip_spec) - self.assertEqual(default_spec['properties']['lb_status_timeout'], - policy.lb_status_timeout) - self.assertIsNone(policy.lb) - - def test_init_with_default_value_network_only(self): - spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': {'subnet': 'internal-subnet'}, - 'vip': {'network': 'external-network'} - } - } - default_spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': { - 'id': None, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'subnet': 'internal-subnet', - 'lb_method': 'ROUND_ROBIN', - 'admin_state_up': True, - 'session_persistence': {}, - }, - 'vip': { - 'address': None, - 'subnet': None, - 'network': 'external-network', - 'connection_limit': -1, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'admin_state_up': True, - }, - 'lb_status_timeout': 300 - } - } - - policy = lb_policy.LoadBalancingPolicy('test-policy', spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.loadbalance-1.3', policy.type) - self.assertEqual(default_spec['properties']['pool'], policy.pool_spec) - self.assertEqual(default_spec['properties']['vip'], policy.vip_spec) - self.assertEqual(default_spec['properties']['lb_status_timeout'], - policy.lb_status_timeout) - self.assertIsNone(policy.lb) - - def test_init_with_default_value_subnet_and_network(self): - spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': {'subnet': 'internal-subnet'}, - 'vip': {'subnet': 'external-subnet', - 'network': 'external-network'} - } - } - default_spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': { - 'id': None, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'subnet': 'internal-subnet', - 'lb_method': 'ROUND_ROBIN', - 'admin_state_up': True, - 'session_persistence': {}, - }, - 'vip': { - 'address': None, - 'subnet': 'external-subnet', - 'network': 'external-network', - 'connection_limit': -1, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'admin_state_up': True, - }, - 'lb_status_timeout': 300 - } - } - - policy = lb_policy.LoadBalancingPolicy('test-policy', spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.loadbalance-1.3', policy.type) - self.assertEqual(default_spec['properties']['pool'], policy.pool_spec) - self.assertEqual(default_spec['properties']['vip'], policy.vip_spec) - self.assertEqual(default_spec['properties']['lb_status_timeout'], - policy.lb_status_timeout) - self.assertIsNone(policy.lb) - - def test_loadbalancer_value(self): - spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'loadbalancer': 'LB_ID', - 'pool': { - 'id': 'POOL_ID', - 'subnet': 'internal-subnet' - }, - 'vip': { - 'address': '192.168.1.100', - 'subnet': 'external-subnet', - 'network': 'external-network', - }, - 'health_monitor': { - 'id': 'HM_ID' - } - } - } - self.spec['properties']['pool']['id'] = 'POOL_ID' - self.spec['properties']['health_monitor']['id'] = 'HM_ID' - self.spec['properties']['loadbalancer'] = 'LB_ID' - self.spec['properties']['pool']['session_persistence'] = {} - self.spec['properties']['vip']['connection_limit'] = -1 - policy = lb_policy.LoadBalancingPolicy('test-policy', spec) - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.loadbalance-1.3', policy.type) - self.assertEqual(self.spec['properties']['pool'], policy.pool_spec) - self.assertEqual(self.spec['properties']['vip'], policy.vip_spec) - self.assertEqual(self.spec['properties']['loadbalancer'], policy.lb) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_shallow(self, mock_validate): - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - ctx = mock.Mock() - - res = policy.validate(ctx, False) - - self.assertTrue(res) - mock_validate.assert_called_with(ctx, False) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_pool_subnet_notfound(self, mock_validate): - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._networkclient = self.net_driver - policy._octaviaclient = self.octavia_driver - ctx = mock.Mock(user='user1', project='project1') - self.net_driver.subnet_get = mock.Mock( - side_effect=exc.InternalError(code='404', message='not found')) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True) - - mock_validate.assert_called_with(ctx, True) - self.net_driver.subnet_get.assert_called_once_with('internal-subnet') - self.assertEqual("The specified subnet 'internal-subnet' could not " - "be found.", str(ex)) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_vip_subnet_notfound(self, mock_validate): - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._networkclient = self.net_driver - policy._octaviaclient = self.octavia_driver - ctx = mock.Mock(user='user1', project='project1') - self.net_driver.subnet_get = mock.Mock( - side_effect=[ - mock.Mock(), # for the internal (pool) one - exc.InternalError(code='404', message='not found') - ] - ) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True) - - mock_validate.assert_called_with(ctx, True) - self.net_driver.subnet_get.assert_has_calls([ - mock.call('internal-subnet'), mock.call('external-subnet') - ]) - self.assertEqual("The specified subnet 'external-subnet' could not " - "be found.", str(ex)) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_vip_network_notfound(self, mock_validate): - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._networkclient = self.net_driver - policy._octaviaclient = self.octavia_driver - ctx = mock.Mock(user='user1', project='project1') - self.net_driver.network_get = mock.Mock( - side_effect=[ - exc.InternalError(code='404', message='not found') - ] - ) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True) - - mock_validate.assert_called_with(ctx, True) - self.net_driver.network_get.assert_called_with('external-network') - self.assertEqual("The specified network 'external-network' could not " - "be found.", str(ex)) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_vip_no_subnet_or_network_provided(self, mock_validate): - spec = copy.deepcopy(self.spec) - del spec['properties']['vip']['subnet'] - del spec['properties']['vip']['network'] - policy = lb_policy.LoadBalancingPolicy('test-policy', spec) - policy._networkclient = self.net_driver - policy._octaviaclient = self.octavia_driver - ctx = mock.Mock(user='user1', project='project1') - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True) - - mock_validate.assert_called_with(ctx, True) - self.assertEqual("At least one of VIP Subnet or Network must be " - "defined.", str(ex)) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_loadbalancer_notfound(self, mock_validate): - self.spec['properties']['loadbalancer'] = "LB_ID" - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._networkclient = self.net_driver - policy._octaviaclient = self.octavia_driver - ctx = mock.Mock(user='user1', project='project1') - self.octavia_driver.loadbalancer_get = mock.Mock( - side_effect=exc.InternalError(code='404', message='not found')) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, ctx, True) - - mock_validate.assert_called_with(ctx, True) - self.octavia_driver.loadbalancer_get.assert_called_once_with('LB_ID') - self.assertEqual("The specified loadbalancer 'LB_ID' could not " - "be found.", str(ex)) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_build_policy_data') - @mock.patch.object(policy_base.Policy, 'attach') - @mock.patch.object(no.Node, 'update') - def test_attach_succeeded(self, m_update, m_attach, m_build): - cluster = mock.Mock(id='CLUSTER_ID', data={}) - node1 = mock.Mock(id='fake1', data={}) - node2 = mock.Mock(id='fake2', data={}) - cluster.nodes = [node1, node2] - m_attach.return_value = (True, None) - m_build.return_value = 'policy_data' - data = { - 'loadbalancer': 'LB_ID', - 'vip_address': '192.168.1.100', - 'pool': 'POOL_ID' - } - self.lb_driver.lb_create.return_value = (True, data) - self.lb_driver.member_add.side_effect = ['MEMBER1_ID', 'MEMBER2_ID'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy.id = 'FAKE_ID' - policy._lbaasclient = self.lb_driver - - res, data = policy.attach(cluster) - - cluster_name = cluster.name - - self.assertTrue(res) - self.assertEqual('policy_data', data) - self.lb_driver.lb_create.assert_called_once_with(policy.vip_spec, - policy.pool_spec, - cluster_name, - policy.hm_spec, - policy.az_spec, - policy.flavor_id_spec) - member_add_calls = [ - mock.call(node1, 'LB_ID', 'POOL_ID', 80, 'internal-subnet'), - mock.call(node2, 'LB_ID', 'POOL_ID', 80, 'internal-subnet') - ] - self.lb_driver.member_add.assert_has_calls(member_add_calls) - node_update_calls = [ - mock.call(mock.ANY, node1.id, - {'data': {'lb_member': 'MEMBER1_ID'}}), - mock.call(mock.ANY, node2.id, - {'data': {'lb_member': 'MEMBER2_ID'}}) - ] - m_update.assert_has_calls(node_update_calls) - expected = { - policy.id: {'vip_address': '192.168.1.100'} - } - self.assertEqual(expected, cluster.data['loadbalancers']) - - @mock.patch.object(policy_base.Policy, 'attach') - def test_attach_failed_base_return_false(self, mock_attach): - cluster = mock.Mock() - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - mock_attach.return_value = (False, 'data') - - res, data = policy.attach(cluster) - - self.assertFalse(res) - self.assertEqual('data', data) - - @mock.patch.object(policy_base.Policy, 'attach') - def test_attach_failed_lb_creation_error(self, m_attach): - cluster = mock.Mock() - m_attach.return_value = (True, None) - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - # lb_driver.lb_create return False - self.lb_driver.lb_create.return_value = (False, 'error') - res = policy.attach(cluster) - self.assertEqual((False, 'error'), res) - - @mock.patch.object(policy_base.Policy, 'attach') - def test_attach_failed_member_add(self, mock_attach): - cluster = mock.Mock() - cluster.nodes = [mock.Mock(id='fake1'), mock.Mock(id='fake2')] - mock_attach.return_value = (True, None) - lb_data = { - 'loadbalancer': 'LB_ID', - 'vip_address': '192.168.1.100', - 'pool': 'POOL_ID' - } - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - # lb_driver.member_add return None - self.lb_driver.lb_create.return_value = (True, lb_data) - self.lb_driver.member_add.return_value = None - - res = policy.attach(cluster) - - self.assertEqual((False, 'Failed in adding node into lb pool'), res) - self.lb_driver.lb_delete.assert_called_once_with(**lb_data) - - def test_post_candidates_node_recover_reboot(self): - node = mock.Mock(id='NODE1_ID') - action = mock.Mock(action=consts.NODE_RECOVER) - action.entity = node - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - candidates = policy._get_post_candidates(action) - - self.assertEqual(['NODE1_ID'], candidates) - - def test_post_candidates_node_recover_empty(self): - node = mock.Mock(id='NODE1_ID') - action = mock.Mock(action=consts.NODE_RECOVER, - outputs={}) - action.entity = node - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - candidates = policy._get_post_candidates(action) - - self.assertEqual(['NODE1_ID'], candidates) - - def test_post_candidates_cluster_resize(self): - action = mock.Mock(action=consts.CLUSTER_RESIZE, - data={ - 'creation': { - 'nodes': ['NODE1_ID', 'NODE2_ID'] - } - }) - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - candidates = policy._get_post_candidates(action) - - self.assertEqual(['NODE1_ID', 'NODE2_ID'], candidates) - - def test_get_delete_candidates_for_node_delete(self): - action = mock.Mock(action=consts.NODE_DELETE, inputs={}, data={}, - entity=mock.Mock(id='NODE_ID')) - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - - self.assertEqual(['NODE_ID'], res) - - def test_get_delete_candidates_no_deletion_data_del_nodes(self): - action = mock.Mock(action=consts.CLUSTER_DEL_NODES, data={}, - inputs={'candidates': ['node1', 'node2']}) - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - - self.assertEqual(['node1', 'node2'], res) - - @mock.patch.object(scaleutils, 'nodes_by_random') - def test_get_delete_candidates_no_deletion_data_scale_in(self, - m_nodes_random): - self.context = utils.dummy_context() - node1 = mock.Mock(id='node1') - node2 = mock.Mock(id='node2') - node3 = mock.Mock(id='node3') - cluster = mock.Mock(nodes=[node1, node2, node3]) - action = mock.Mock( - action=consts.CLUSTER_SCALE_IN, - entity=cluster, - data={}, - inputs={'count': 1} - ) - - m_nodes_random.return_value = ['node3'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy._get_delete_candidates('CLUSTER_ID', action) - - m_nodes_random.assert_called_once_with([node1, node2, node3], 1) - self.assertEqual(['node3'], res) - - @mock.patch.object(scaleutils, 'parse_resize_params') - @mock.patch.object(scaleutils, 'nodes_by_random') - def test_get_delete_candidates_no_deletion_data_resize( - self, m_nodes_random, m_parse_param): - - def _parse_param(action, cluster, current): - action.data = {'deletion': {'count': 2}} - - self.context = utils.dummy_context() - node1 = mock.Mock(id='node1') - node2 = mock.Mock(id='node2') - node3 = mock.Mock(id='node3') - cluster = mock.Mock(id='cluster1') - cluster.nodes = [node1, node2, node3] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = cluster - - m_parse_param.side_effect = _parse_param - m_nodes_random.return_value = ['node1', 'node3'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - - m_parse_param.assert_called_once_with(action, cluster, 3) - m_nodes_random.assert_called_once_with([node1, node2, node3], 2) - self.assertEqual(['node1', 'node3'], res) - - @mock.patch.object(scaleutils, 'nodes_by_random') - def test_get_delete_candidates_deletion_no_candidates(self, - m_nodes_random): - self.context = utils.dummy_context() - node1 = mock.Mock(id='node1') - node2 = mock.Mock(id='node2') - node3 = mock.Mock(id='node3') - cluster = mock.Mock(id='cluster1') - cluster.nodes = [node1, node2, node3] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = cluster - action.data = {'deletion': {'count': 1}} - - m_nodes_random.return_value = ['node2'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - - m_nodes_random.assert_called_once_with([node1, node2, node3], 1) - - self.assertEqual(['node2'], res) - self.assertEqual({'deletion': {'count': 1, 'candidates': ['node2']}}, - action.data) - - def test_get_delete_candidates_deletion_count_is_zero(self): - self.context = utils.dummy_context() - action = mock.Mock(data={'deletion': {'number': 3}}) - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - - self.assertEqual([], res) - - @mock.patch.object(scaleutils, 'nodes_by_random') - def test_get_delete_candidates_deletion_count_over_size(self, - m_nodes_random): - node1 = mock.Mock(id='node1') - node2 = mock.Mock(id='node2') - node3 = mock.Mock(id='node3') - cluster = mock.Mock(id='cluster1') - cluster.nodes = [node1, node2, node3] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = cluster - action.data = {'deletion': {'count': 4}} - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - policy._get_delete_candidates('CLUSTERID', action) - - m_nodes_random.assert_called_once_with([node1, node2, node3], 3) - - def test_get_delete_candidates_deletion_with_candidates(self): - action = mock.Mock() - action.data = {'deletion': {'count': 1, 'candidates': ['node3']}} - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy._get_delete_candidates('CLUSTERID', action) - self.assertEqual(['node3'], res) - - @mock.patch.object(scaleutils, 'nodes_by_random') - def test_get_delete_candidates_no_deletion_data_count_gt_one_scale_in( - self, m_nodes_random - ): - self.context = utils.dummy_context() - node1 = mock.Mock(id='node1') - node2 = mock.Mock(id='node2') - node3 = mock.Mock(id='node3') - cluster = mock.Mock(nodes=[node1, node2, node3]) - action = mock.Mock( - action=consts.CLUSTER_SCALE_IN, - entity=cluster, - data={}, - inputs={'count': 2} - ) - - m_nodes_random.return_value = ['node1', 'node3'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy._get_delete_candidates('CLUSTER_ID', action) - - m_nodes_random.assert_called_once_with([node1, node2, node3], 2) - self.assertEqual(['node1', 'node3'], res) - - -@mock.patch.object(cluster_policy.ClusterPolicy, 'load') -@mock.patch.object(lb_policy.LoadBalancingPolicy, '_extract_policy_data') -class TestLoadBalancingPolicyOperations(base.SenlinTestCase): - - def setUp(self): - super(TestLoadBalancingPolicyOperations, self).setUp() - - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.loadbalance', - 'version': '1.3', - 'properties': { - 'pool': { - 'protocol': 'HTTP', - 'protocol_port': 80, - 'subnet': 'test-subnet', - 'lb_method': 'ROUND_ROBIN', - 'admin_state_up': True, - 'session_persistence': { - 'type': 'SOURCE_IP', - 'cookie_name': 'whatever' - } - }, - 'vip': { - 'address': '192.168.1.100', - 'subnet': 'test-subnet', - 'network': 'test-network', - 'connection_limit': 500, - 'protocol': 'HTTP', - 'protocol_port': 80, - 'admin_state_up': True, - }, - 'health_monitor': { - 'type': 'HTTP', - 'delay': '1', - 'timeout': 1, - 'max_retries': 5, - 'admin_state_up': True, - 'http_method': 'GET', - 'url_path': '/index.html', - 'expected_codes': '200,201,202' - }, - 'availability_zone': 'test_az', - } - } - self.lb_driver = mock.Mock() - self.patchobject(oslo_context, 'get_current') - - def test_detach_no_policy_data(self, m_extract, m_load): - cluster = mock.Mock() - m_extract.return_value = None - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - res, data = policy.detach(cluster) - - self.assertTrue(res) - self.assertEqual('LB resources deletion succeeded.', data) - - def test_detach_succeeded(self, m_extract, m_load): - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', - 'data': policy_data - } - } - cp.data = cp_data - m_load.return_value = cp - m_extract.return_value = policy_data - self.lb_driver.lb_delete.return_value = (True, 'lb_delete succeeded.') - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - cluster = mock.Mock( - id='CLUSTER_ID', - data={ - 'loadbalancers': { - policy.id: {'vip_address': '192.168.1.100'} - - } - }) - node = mock.Mock(id='fake', data={}) - cluster.nodes = [node] - - res, data = policy.detach(cluster) - - self.assertTrue(res) - self.assertEqual('lb_delete succeeded.', data) - m_load.assert_called_once_with(mock.ANY, cluster.id, policy.id) - m_extract.assert_called_once_with(cp_data) - self.lb_driver.lb_delete.assert_called_once_with(**policy_data) - self.assertEqual({}, cluster.data) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test_detach_existed_lbass_succeeded(self, m_remove, m_extract, - m_load): - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID', - 'preexisting': True, - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', - 'data': policy_data - } - } - cp.data = cp_data - m_load.return_value = cp - m_extract.return_value = policy_data - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - cluster = mock.Mock( - id='CLUSTER_ID', - data={ - 'loadbalancers': { - policy.id: {'vip_address': '192.168.1.100'} - - } - }) - node = mock.Mock(id='fake', data={}) - cluster.nodes = [node] - m_remove.return_value = [] - - res, data = policy.detach(cluster) - - self.assertTrue(res) - self.assertEqual('LB resources deletion succeeded.', data) - m_load.assert_called_once_with(mock.ANY, cluster.id, policy.id) - m_extract.assert_called_once_with(cp_data) - m_remove.assert_called_with(mock.ANY, ['fake'], cp, self.lb_driver) - self.assertEqual({}, cluster.data) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test_detach_existed_lbass_failed(self, m_remove, m_extract, m_load): - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID', - 'preexisting': True, - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', - 'data': policy_data - } - } - cp.data = cp_data - m_load.return_value = cp - m_extract.return_value = policy_data - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - cluster = mock.Mock( - id='CLUSTER_ID', - data={ - 'loadbalancers': { - policy.id: {'vip_address': '192.168.1.100'} - - } - }) - node1 = mock.Mock(id='node1', data={}) - node2 = mock.Mock(id='node2', data={}) - cluster.nodes = [node1, node2] - m_remove.return_value = [node2.id] - - res, data = policy.detach(cluster) - - self.assertFalse(res) - self.assertEqual('Failed to remove servers from existed LB.', data) - m_load.assert_called_once_with(mock.ANY, cluster.id, policy.id) - m_extract.assert_called_once_with(cp_data) - m_remove.assert_called_with(mock.ANY, ['node1', 'node2'], cp, - self.lb_driver) - self.assertEqual({ - 'loadbalancers': { - None: {'vip_address': '192.168.1.100'} - }}, - cluster.data) - - def test_detach_failed_lb_delete(self, m_extract, m_load): - cluster = mock.Mock() - policy_data = { - 'preexisting': False, - } - m_extract.return_value = policy_data - self.lb_driver.lb_delete.return_value = (False, 'lb_delete failed.') - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - res, data = policy.detach(cluster) - - self.assertFalse(res) - self.assertEqual('lb_delete failed.', data) - - def test_post_op_no_nodes(self, m_extract, m_load): - action = mock.Mock(data={}) - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - - res = policy.post_op('FAKE_ID', action) - - self.assertIsNone(res) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'update') - def test_add_member(self, m_node_update, m_node_get, - m_extract, m_load): - node1 = mock.Mock(id='NODE1_ID', data={}) - node2 = mock.Mock(id='NODE2_ID', data={}) - action = mock.Mock(context='action_context', - action=consts.CLUSTER_RESIZE, - data={ - 'creation': { - 'nodes': ['NODE1_ID', 'NODE2_ID'] - } - }) - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', - 'data': policy_data - } - } - cp.data = cp_data - self.lb_driver.member_add.side_effect = ['MEMBER1_ID', 'MEMBER2_ID'] - m_node_get.side_effect = [node1, node2] - m_extract.return_value = policy_data - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - # do it - candidates = ['NODE1_ID', 'NODE2_ID'] - res = policy._add_member(action.context, candidates, - cp, self.lb_driver) - - # assertions - self.assertEqual([], res) - m_extract.assert_called_once_with(cp_data) - calls_node_get = [ - mock.call('action_context', node_id='NODE1_ID'), - mock.call('action_context', node_id='NODE2_ID') - ] - m_node_get.assert_has_calls(calls_node_get) - calls_node_update = [ - mock.call(action.context, 'NODE1_ID', mock.ANY), - mock.call(action.context, 'NODE2_ID', mock.ANY) - ] - m_node_update.assert_has_calls(calls_node_update) - calls_member_add = [ - mock.call(node1, 'LB_ID', 'POOL_ID', 80, 'test-subnet'), - mock.call(node2, 'LB_ID', 'POOL_ID', 80, 'test-subnet'), - ] - self.lb_driver.member_add.assert_has_calls(calls_member_add) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'update') - def test_add_member_fail(self, m_node_update, m_node_get, - m_extract, m_load): - node1 = mock.Mock(id='NODE1_ID', data={}) - action = mock.Mock(context='action_context', - action=consts.CLUSTER_RESIZE, - data={ - 'creation': { - 'nodes': ['NODE1_ID'] - } - }) - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', - 'data': policy_data - } - } - cp.data = cp_data - self.lb_driver.member_add.return_value = None - m_node_get.return_value = node1 - m_extract.return_value = policy_data - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - # do it - candidates = ['NODE1_ID'] - res = policy._add_member(action.context, candidates, - cp, self.lb_driver) - - # assertions - self.assertEqual(['NODE1_ID'], res) - m_extract.assert_called_once_with(cp_data) - m_node_get.assert_called_once_with( - 'action_context', node_id='NODE1_ID') - m_node_update.assert_called_once_with( - 'action_context', 'NODE1_ID', mock.ANY) - self.lb_driver.member_add.assert_called_once_with( - node1, 'LB_ID', 'POOL_ID', 80, 'test-subnet') - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_post_candidates') - def test_post_op_node_create(self, m_get, m_remove, m_add, - m_candidates, m_load): - ctx = mock.Mock() - cid = 'CLUSTER_ID' - cluster = mock.Mock(user='user1', project='project1') - action = mock.Mock(data={}, context=ctx, action=consts.NODE_CREATE, - node=mock.Mock(id='NODE_ID'), - inputs={'action_result': 'OK'}) - action.entity = cluster - cp = mock.Mock() - m_load.return_value = cp - m_add.return_value = [] - m_get.return_value = ['NODE_ID'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - # do it - res = policy.post_op(cid, action) - - # assertion - self.assertIsNone(res) - m_get.assert_called_once_with(action) - m_load.assert_called_once_with(ctx, cid, policy.id) - m_add.assert_called_once_with(ctx, ['NODE_ID'], cp, self.lb_driver) - self.assertFalse(m_remove.called) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_delete_candidates') - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_node_replace( - self, m_cluster_get, m_get, m_remove, m_add, m_candidates, m_load - ): - ctx = mock.Mock() - cluster_id = 'CLUSTER_ID' - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=5, - min_size=0 - ) - action = mock.Mock( - data={}, - context=ctx, - action=consts.CLUSTER_REPLACE_NODES, - inputs={'candidates': {'OLD_NODE_ID': 'NEW_NODE_ID'}}, - entity=cluster - ) - - m_cluster_get.return_value = cluster - cp = mock.Mock() - m_load.return_value = cp - m_add.return_value = [] - m_get.return_value = ['OLD_NODE_ID'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - res = policy.pre_op(cluster_id, action) - - # assertion - self.assertIsNone(res) - m_get.assert_called_once_with(cluster_id, action) - m_load.assert_called_once_with(ctx, cluster_id, policy.id) - m_remove.assert_called_once_with( - ctx, ['OLD_NODE_ID'], cp, self.lb_driver - ) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_post_candidates') - def test_post_op_node_replace(self, m_get, m_remove, m_add, - m_candidates, m_load): - ctx = mock.Mock() - cid = 'CLUSTER_ID' - cluster = mock.Mock(user='user1', project='project1') - action = mock.Mock(data={}, context=ctx, - action=consts.CLUSTER_REPLACE_NODES, - inputs={'candidates': { - 'OLD_NODE_ID': 'NEW_NODE_ID'}}) - action.entity = cluster - cp = mock.Mock() - m_load.return_value = cp - m_add.return_value = [] - m_get.return_value = ['NEW_NODE_ID'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - # do it - res = policy.post_op(cid, action) - - # assertion - self.assertIsNone(res) - m_get.assert_called_once_with(action) - m_load.assert_called_once_with(ctx, cid, policy.id) - m_add.assert_called_once_with(ctx, ['NEW_NODE_ID'], cp, self.lb_driver) - self.assertFalse(m_remove.called) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_post_candidates') - def test_post_op_add_nodes(self, m_get, m_remove, m_add, - m_candidates, m_load): - cid = 'CLUSTER_ID' - cluster = mock.Mock(user='user1', project='project1') - action = mock.Mock(context='action_context', - action=consts.CLUSTER_RESIZE, - data={ - 'creation': { - 'nodes': ['NODE1_ID', 'NODE2_ID'] - } - }, - inputs={'action_result': 'OK'}) - action.entity = cluster - candidates = ['NODE1_ID', 'NODE2_ID'] - m_get.return_value = candidates - cp = mock.Mock() - m_load.return_value = cp - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - # do it - res = policy.post_op(cid, action) - - # assertions - self.assertIsNone(res) - m_get.assert_called_once_with(action) - m_load.assert_called_once_with('action_context', cid, policy.id) - m_add.assert_called_once_with(action.context, candidates, - cp, self.lb_driver) - self.assertFalse(m_remove.called) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_process_recovery') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_post_candidates') - def test_post_op_node_recover(self, m_get, m_recovery, m_add, - m_candidates, m_load): - cid = 'CLUSTER_ID' - node = mock.Mock(user='user1', project='project1', id='NODE1') - action = mock.Mock(context='action_context', - action=consts.NODE_RECOVER, - data={}, - outputs={}, - inputs={'action_result': 'OK'}) - action.entity = node - m_recovery.return_value = ['NODE1'] - m_get.return_value = ['NODE1'] - cp = mock.Mock() - m_load.return_value = cp - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - # do it - res = policy.post_op(cid, action) - - # assertions - self.assertIsNone(res) - m_get.assert_called_once_with(action) - m_load.assert_called_once_with('action_context', cid, policy.id) - m_add.assert_called_once_with(action.context, ['NODE1'], - cp, self.lb_driver) - m_recovery.assert_called_once_with(['NODE1'], cp, self.lb_driver, - action) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_add_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_get_post_candidates') - def test_post_op_clusterresize_failed(self, m_get, m_remove, m_add, - m_candidates, m_load): - cluster_id = 'CLUSTER_ID' - action = mock.Mock(data={'creation': {'nodes': ['NODE1_ID']}}, - context='action_context', - action=consts.CLUSTER_RESIZE, - inputs={'action_result': 'OK'}) - - cp = mock.Mock() - m_load.return_value = cp - m_get.return_value = ['NODE1_ID'] - m_add.return_value = ['NODE1_ID'] - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - res = policy.post_op(cluster_id, action) - - self.assertIsNone(res) - self.assertEqual(policy_base.CHECK_ERROR, action.data['status']) - self.assertEqual("Failed in adding nodes into lb pool: " - "['NODE1_ID']", action.data['reason']) - m_get.assert_called_once_with(action) - m_add.assert_called_once_with(action.context, ['NODE1_ID'], - cp, self.lb_driver) - self.assertFalse(m_remove.called) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'update') - def test_remove_member(self, m_node_update, m_node_get, - m_extract, m_load): - node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) - node2 = mock.Mock(id='NODE2', data={'lb_member': 'MEM_ID2'}) - action = mock.Mock( - context='action_context', action=consts.CLUSTER_DEL_NODES, - data={ - 'deletion': { - 'count': 2, - 'candidates': ['NODE1', 'NODE2'] - } - }) - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', 'data': policy_data - } - } - cp.data = cp_data - self.lb_driver.member_remove.return_value = True - m_node_get.side_effect = [node1, node2] - m_extract.return_value = policy_data - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - candidates = [node1.id, node2.id] - res = policy._remove_member(action.context, candidates, - cp, self.lb_driver) - - m_extract.assert_called_once_with(cp_data) - calls_node_get = [ - mock.call(action.context, node_id='NODE1'), - mock.call(action.context, node_id='NODE2') - ] - m_node_get.assert_has_calls(calls_node_get) - calls_node_update = [ - mock.call(action.context, 'NODE1', mock.ANY), - mock.call(action.context, 'NODE2', mock.ANY) - ] - m_node_update.assert_has_calls(calls_node_update) - calls_member_del = [ - mock.call('LB_ID', 'POOL_ID', 'MEM_ID1'), - mock.call('LB_ID', 'POOL_ID', 'MEM_ID2') - ] - self.lb_driver.member_remove.assert_has_calls(calls_member_del) - self.assertEqual([], res) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'update') - def test_remove_member_not_in_pool(self, m_node_update, m_node_get, - m_extract, m_load): - node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) - node2 = mock.Mock(id='NODE2', data={}) - action = mock.Mock( - context='action_context', action=consts.CLUSTER_DEL_NODES, - data={ - 'deletion': { - 'count': 2, - 'candidates': ['NODE1', 'NODE2'] - } - }) - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', 'data': policy_data - } - } - cp.data = cp_data - self.lb_driver.member_remove.return_value = True - m_node_get.side_effect = [node1, node2] - m_extract.return_value = policy_data - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - candidates = [node1.id, node2.id] - res = policy._remove_member(action.context, candidates, - cp, self.lb_driver) - - m_extract.assert_called_once_with(cp_data) - calls_node_get = [ - mock.call(action.context, node_id='NODE1'), - mock.call(action.context, node_id='NODE2') - ] - m_node_get.assert_has_calls(calls_node_get) - m_node_update.assert_called_once_with( - action.context, 'NODE1', mock.ANY) - self.lb_driver.member_remove.assert_called_once_with( - 'LB_ID', 'POOL_ID', 'MEM_ID1') - self.assertEqual([], res) - - @mock.patch.object(no.Node, 'get') - @mock.patch.object(no.Node, 'update') - def test_remove_member_fail(self, m_node_update, m_node_get, - m_extract, m_load): - node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) - action = mock.Mock( - context='action_context', action=consts.CLUSTER_DEL_NODES, - data={ - 'deletion': { - 'count': 1, - 'candidates': ['NODE1'] - } - }) - cp = mock.Mock() - policy_data = { - 'loadbalancer': 'LB_ID', - 'listener': 'LISTENER_ID', - 'pool': 'POOL_ID', - 'healthmonitor': 'HM_ID' - } - cp_data = { - 'LoadBalancingPolicy': { - 'version': '1.0', 'data': policy_data - } - } - cp.data = cp_data - self.lb_driver.member_remove.return_value = False - m_node_get.return_value = node1 - m_extract.return_value = policy_data - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - - candidates = [node1.id] - res = policy._remove_member(action.context, candidates, - cp, self.lb_driver) - - m_extract.assert_called_once_with(cp_data) - m_node_get.assert_called_once_with(action.context, node_id='NODE1') - m_node_update.assert_called_once_with( - action.context, 'NODE1', mock.ANY) - self.lb_driver.member_remove.assert_called_once_with( - 'LB_ID', 'POOL_ID', 'MEM_ID1') - self.assertEqual(['NODE1'], res) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_del_nodes_ok( - self, m_cluster_get, m_remove, m_candidates, m_load - ): - cluster_id = 'CLUSTER_ID' - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=5, - min_size=0 - ) - action = mock.Mock( - context='action_context', - action=consts.CLUSTER_DEL_NODES, - entity=cluster, - data={ - 'deletion': { - 'count': 2, - 'candidates': ['NODE1_ID', 'NODE2_ID'] - } - } - ) - - m_cluster_get.return_value = cluster - m_candidates.return_value = ['NODE1_ID', 'NODE2_ID'] - m_load.return_value = mock.Mock() - m_remove.return_value = [] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - res = policy.pre_op(cluster_id, action) - - self.assertIsNone(res) - m_load.assert_called_once_with('action_context', cluster_id, policy.id) - expected_data = { - 'deletion': { - 'candidates': ['NODE1_ID', 'NODE2_ID'], 'count': 2 - } - } - self.assertEqual(expected_data, action.data) - - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_del_nodes_failed( - self, m_cluster_get, m_remove, *args, **kwargs - ): - cluster_id = 'CLUSTER_ID' - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=5, - min_size=0 - ) - action = mock.Mock( - action=consts.CLUSTER_RESIZE, - context='action_context', - data={'deletion': {'candidates': ['NODE1_ID']}}, - entity=cluster - ) - - m_cluster_get.return_value = cluster - m_remove.return_value = ['NODE1_ID'] - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - policy._lbaasclient = self.lb_driver - res = policy.pre_op(cluster_id, action) - - self.assertIsNone(res) - self.assertEqual(policy_base.CHECK_ERROR, action.data['status']) - self.assertEqual( - "Failed in removing deleted node(s) from lb pool: ['NODE1_ID']", - action.data['reason'] - ) - - m_remove.assert_called_once_with(action.context, ['NODE1_ID'], - mock.ANY, self.lb_driver) - - @mock.patch.object(no.Node, 'update') - def test_process_recovery_not_lb_member(self, m_update, m1, m2): - node = mock.Mock(id='NODE', data={}) - action = mock.Mock( - action=consts.NODE_RECOVER, - context='action_context') - action.entity = node - - cp = mock.Mock() - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy._process_recovery(['NODE'], cp, self.lb_driver, action) - - self.assertEqual(['NODE'], res) - m_update.assert_called_once_with(action.context, 'NODE', {'data': {}}) - - @mock.patch.object(no.Node, 'update') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test_process_recovery_reboot(self, m_remove, m_update, m1, m2): - node = mock.Mock(id='NODE', data={'lb_member': 'mem_1'}) - action = mock.Mock( - action=consts.NODE_RECOVER, - context='action_context') - action.entity = node - - cp = mock.Mock() - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy._process_recovery(['NODE'], cp, self.lb_driver, action) - - self.assertIsNone(res) - - self.assertFalse(m_remove.called) - self.assertFalse(m_update.called) - - @mock.patch.object(no.Node, 'update') - @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test_process_recovery_recreate(self, m_remove, m_update, m1, m2): - node = mock.Mock(id='NODE', data={'lb_member': 'mem_1', - 'recovery': 'RECREATE'}) - action = mock.Mock( - action=consts.NODE_RECOVER, - context='action_context') - action.entity = node - - cp = mock.Mock() - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy._process_recovery(['NODE'], cp, self.lb_driver, action) - - self.assertEqual(['NODE'], res) - m_remove.assert_called_once_with(action.context, ['NODE'], cp, - self.lb_driver, handle_err=False) - m_update.assert_called_once_with(action.context, 'NODE', {'data': {}}) - - @mock.patch.object( - lb_policy.LoadBalancingPolicy, '_get_delete_candidates' - ) - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_cluster_del_nodes_at_min_threshold( - self, m_cluster_get, m_candidates, *args, **kwargs - ): - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=1, - min_size=1 - ) - action = mock.Mock( - action=consts.CLUSTER_DEL_NODES, - entity=cluster, - context='action_context' - ) - - m_cluster_get.return_value = cluster - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy.pre_op(cluster_id='CLUSTER_ID', action=action) - - self.assertIsNone(res) - m_candidates.assert_not_called() - - @mock.patch.object( - lb_policy.LoadBalancingPolicy, '_get_delete_candidates' - ) - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_cluster_scale_in_at_min_threshold( - self, m_cluster_get, m_candidates, *args, **kwargs - ): - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=1, - min_size=1 - ) - action = mock.Mock( - action=consts.CLUSTER_SCALE_IN, - entity=cluster, - context='action_context' - ) - - m_cluster_get.return_value = cluster - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy.pre_op(cluster_id='CLUSTER_ID', action=action) - - self.assertIsNone(res) - m_candidates.assert_not_called() - - @mock.patch.object( - lb_policy.LoadBalancingPolicy, '_get_delete_candidates' - ) - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_cluster_resize_at_min_threshold( - self, m_cluster_get, m_candidates, *args, **kwargs - ): - cluster_id = 'CLUSTER_ID' - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=1, - min_size=1 - ) - action = mock.Mock( - action=consts.CLUSTER_RESIZE, - entity=cluster, - context='action_context' - ) - - m_cluster_get.return_value = cluster - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy.pre_op(cluster_id=cluster_id, action=action) - - self.assertIsNone(res) - m_candidates.assert_called_once_with(cluster_id, action) - - @mock.patch.object( - lb_policy.LoadBalancingPolicy, '_get_delete_candidates' - ) - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_node_delete_at_min_threshold( - self, m_cluster_get, m_candidates, *args, **kwargs - ): - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=1, - min_size=1 - ) - node = mock.Mock() - action = mock.Mock( - action=consts.NODE_DELETE, - entity=node, - context='action_context' - ) - - m_cluster_get.return_value = cluster - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy.pre_op(cluster_id='CLUSTER_ID', action=action) - - self.assertIsNone(res) - m_candidates.assert_not_called() - - @mock.patch.object( - lb_policy.LoadBalancingPolicy, '_get_delete_candidates' - ) - @mock.patch.object(co.Cluster, 'get') - def test_pre_op_cluster_replace_at_min_threshold( - self, m_cluster_get, m_candidates, *args, **kwargs - ): - cluster_id = 'CLUSTER_ID' - cluster = mock.Mock( - user='user1', - project='project1', - desired_capacity=2, - min_size=2 - ) - action = mock.Mock( - action=consts.CLUSTER_REPLACE_NODES, - entity=cluster, - context='action_context', - ) - - m_cluster_get.return_value = cluster - - policy = lb_policy.LoadBalancingPolicy('test-policy', self.spec) - res = policy.pre_op(cluster_id=cluster_id, action=action) - - self.assertIsNone(res) - m_candidates.assert_called_once_with(cluster_id, action) diff --git a/senlin/tests/unit/policies/test_policy.py b/senlin/tests/unit/policies/test_policy.py deleted file mode 100644 index 75fa8ba29..000000000 --- a/senlin/tests/unit/policies/test_policy.py +++ /dev/null @@ -1,601 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_context import context as oslo_ctx -from oslo_utils import timeutils - -from senlin.common import consts -from senlin.common import context as senlin_ctx -from senlin.common import exception -from senlin.common import schema -from senlin.common import utils as common_utils -from senlin.engine import environment -from senlin.engine import parser -from senlin.objects import credential as co -from senlin.objects import policy as po -from senlin.policies import base as pb -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -UUID1 = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -UUID2 = '2c5139a6-24ba-4a6f-bd53-a268f61536de' - -sample_policy = """ - type: senlin.policy.dummy - version: 1.0 - properties: - key1: value1 - key2: 2 -""" - - -class DummyPolicy(pb.Policy): - VERSION = '1.0' - - properties_schema = { - 'key1': schema.String( - 'first key', - default='value1' - ), - 'key2': schema.Integer( - 'second key', - required=True, - ), - } - - def __init__(self, name, spec, **kwargs): - super(DummyPolicy, self).__init__(name, spec, **kwargs) - - -class TestPolicyBase(base.SenlinTestCase): - - def setUp(self): - super(TestPolicyBase, self).setUp() - self.ctx = utils.dummy_context() - environment.global_env().register_policy('senlin.policy.dummy-1.0', - DummyPolicy) - environment.global_env().register_policy('senlin.policy.dummy-1.1', - DummyPolicy) - self.spec = parser.simple_parse(sample_policy) - - def _create_policy(self, policy_name, policy_id=None): - policy = pb.Policy(policy_name, self.spec, - user=self.ctx.user_id, - project=self.ctx.project_id, - domain=self.ctx.domain_id) - if policy_id: - policy.id = policy_id - - return policy - - def _create_db_policy(self, **kwargs): - values = { - 'name': 'test-policy', - 'type': 'senlin.policy.dummy-1.0', - 'spec': self.spec, - 'created_at': timeutils.utcnow(True), - 'user': self.ctx.user_id, - 'project': self.ctx.project_id, - 'domain': self.ctx.domain_id, - } - - values.update(kwargs) - return po.Policy.create(self.ctx, values) - - def test_init(self): - policy = self._create_policy('test-policy') - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual(self.spec, policy.spec) - self.assertEqual('senlin.policy.dummy-1.0', policy.type) - self.assertEqual(self.ctx.user_id, policy.user) - self.assertEqual(self.ctx.project_id, policy.project) - self.assertEqual(self.ctx.domain_id, policy.domain) - self.assertEqual({}, policy.data) - self.assertIsNone(policy.created_at) - self.assertIsNone(policy.updated_at) - self.assertTrue(policy.singleton) - - spec_data = policy.spec_data - self.assertEqual('senlin.policy.dummy', spec_data['type']) - self.assertEqual('1.0', spec_data['version']) - self.assertEqual({'key1': 'value1', 'key2': 2}, - spec_data['properties']) - self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) - - def test_init_version_as_float(self): - self.spec['version'] = 1.1 - policy = self._create_policy('test-policy') - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual(self.spec, policy.spec) - self.assertEqual('senlin.policy.dummy-1.1', policy.type) - self.assertEqual(self.ctx.user_id, policy.user) - self.assertEqual(self.ctx.project_id, policy.project) - self.assertEqual(self.ctx.domain_id, policy.domain) - self.assertEqual({}, policy.data) - self.assertIsNone(policy.created_at) - self.assertIsNone(policy.updated_at) - self.assertTrue(policy.singleton) - - spec_data = policy.spec_data - self.assertEqual('senlin.policy.dummy', spec_data['type']) - self.assertEqual('1.1', spec_data['version']) - self.assertEqual({'key1': 'value1', 'key2': 2}, - spec_data['properties']) - self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) - - def test_init_version_as_string(self): - self.spec['version'] = '1.1' - policy = self._create_policy('test-policy') - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual(self.spec, policy.spec) - self.assertEqual('senlin.policy.dummy-1.1', policy.type) - self.assertEqual(self.ctx.user_id, policy.user) - self.assertEqual(self.ctx.project_id, policy.project) - self.assertEqual(self.ctx.domain_id, policy.domain) - self.assertEqual({}, policy.data) - self.assertIsNone(policy.created_at) - self.assertIsNone(policy.updated_at) - self.assertTrue(policy.singleton) - - spec_data = policy.spec_data - self.assertEqual('senlin.policy.dummy', spec_data['type']) - self.assertEqual('1.1', spec_data['version']) - self.assertEqual({'key1': 'value1', 'key2': 2}, - spec_data['properties']) - self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) - - def test_policy_new_type_not_found(self): - bad_spec = { - 'type': 'bad-type', - 'version': '1.0', - 'properties': '', - } - - self.assertRaises(exception.ResourceNotFound, - pb.Policy, - 'test-policy', bad_spec) - - def test_load(self): - policy = utils.create_policy(self.ctx, UUID1) - result = pb.Policy.load(self.ctx, policy.id) - - self.assertEqual(policy.id, result.id) - self.assertEqual(policy.name, result.name) - self.assertEqual(policy.type, result.type) - self.assertEqual(policy.user, result.user) - self.assertEqual(policy.project, result.project) - self.assertEqual(policy.domain, result.domain) - self.assertEqual(policy.spec, result.spec) - self.assertEqual(policy.data, result.data) - self.assertEqual({'key1': 'value1', 'key2': 2}, result.properties) - - self.assertEqual(policy.created_at, result.created_at) - self.assertEqual(policy.updated_at, result.updated_at) - - def test_load_with_policy(self): - policy = utils.create_policy(self.ctx, UUID1) - expected = pb.Policy.load(self.ctx, policy.id) - - res = pb.Policy.load(self.ctx, db_policy=policy) - - self.assertIsNotNone(res) - self.assertEqual(expected.id, res.id) - - def test_load_diff_project(self): - policy = utils.create_policy(self.ctx, UUID1) - - new_ctx = utils.dummy_context(project='a-different-project') - self.assertRaises(exception.ResourceNotFound, - pb.Policy.load, - new_ctx, policy.id, None) - - res = pb.Policy.load(new_ctx, policy.id, project_safe=False) - self.assertIsNotNone(res) - self.assertEqual(policy.id, res.id) - - def test_load_not_found(self): - ex = self.assertRaises(exception.ResourceNotFound, - pb.Policy.load, - self.ctx, 'fake-policy', None) - self.assertEqual("The policy 'fake-policy' could not be found.", - str(ex)) - - ex = self.assertRaises(exception.ResourceNotFound, - pb.Policy.load, - self.ctx, None, None) - self.assertEqual("The policy 'None' could not be found.", - str(ex)) - - def test_delete(self): - policy = utils.create_policy(self.ctx, UUID1) - policy_id = policy.id - - res = pb.Policy.delete(self.ctx, policy_id) - self.assertIsNone(res) - self.assertRaises(exception.ResourceNotFound, - pb.Policy.load, - self.ctx, policy_id, None) - - def test_delete_not_found(self): - result = pb.Policy.delete(self.ctx, 'bogus') - self.assertIsNone(result) - - def test_store_for_create(self): - policy = self._create_policy('test-policy') - self.assertIsNone(policy.id) - - policy_id = policy.store(self.ctx) - self.assertIsNotNone(policy_id) - self.assertEqual(policy_id, policy.id) - - result = po.Policy.get(self.ctx, policy_id) - - self.assertIsNotNone(result) - self.assertEqual('test-policy', result.name) - self.assertEqual(policy_id, result.id) - self.assertEqual(policy.type, result.type) - self.assertEqual(policy.user, result.user) - self.assertEqual(policy.project, result.project) - self.assertEqual(policy.domain, result.domain) - self.assertEqual(policy.spec, result.spec) - self.assertEqual(policy.data, result.data) - - self.assertIsNotNone(result.created_at) - self.assertIsNone(result.updated_at) - - def test_store_for_update(self): - policy = self._create_policy('test-policy') - self.assertIsNone(policy.id) - policy_id = policy.store(self.ctx) - self.assertIsNotNone(policy_id) - self.assertEqual(policy_id, policy.id) - - # do an update - policy.name = 'test-policy-1' - policy.data = {'kk': 'vv'} - - new_id = policy.store(self.ctx) - self.assertEqual(policy_id, new_id) - - result = po.Policy.get(self.ctx, policy_id) - self.assertIsNotNone(result) - self.assertEqual('test-policy-1', result.name) - self.assertEqual({'kk': 'vv'}, policy.data) - self.assertIsNotNone(policy.created_at) - self.assertIsNotNone(policy.updated_at) - - def test_to_dict(self): - policy = self._create_policy('test-policy') - policy_id = policy.store(self.ctx) - self.assertIsNotNone(policy_id) - expected = { - 'id': policy_id, - 'name': policy.name, - 'type': policy.type, - 'user': policy.user, - 'project': policy.project, - 'domain': policy.domain, - 'spec': policy.spec, - 'data': policy.data, - 'created_at': common_utils.isotime(policy.created_at), - 'updated_at': None, - } - - result = pb.Policy.load(self.ctx, policy_id=policy.id) - self.assertEqual(expected, result.to_dict()) - - def test_get_schema(self): - expected = { - 'key1': { - 'default': 'value1', - 'description': 'first key', - 'required': False, - 'updatable': False, - 'type': 'String' - }, - 'key2': { - 'description': 'second key', - 'required': True, - 'updatable': False, - 'type': 'Integer' - }, - } - res = DummyPolicy.get_schema() - self.assertEqual(expected, res) - - def test_build_policy_data(self): - policy = self._create_policy('test-policy') - data = {'key1': 'value1'} - res = policy._build_policy_data(data) - expect_result = { - 'DummyPolicy': { - 'version': '1.0', - 'data': data - } - } - self.assertEqual(expect_result, res) - - def test_extract_policy_data(self): - policy = self._create_policy('test-policy') - # Extract data correctly - data = {'key1': 'value1'} - policy_data = { - 'DummyPolicy': { - 'version': '1.0', - 'data': data - } - } - res = policy._extract_policy_data(policy_data) - self.assertEqual(data, res) - - # Policy class name unmatch - data = {'key1': 'value1'} - policy_data = { - 'FakePolicy': { - 'version': '1.0', - 'data': data - } - } - res = policy._extract_policy_data(policy_data) - self.assertIsNone(res) - - # Policy version don't match - data = {'key1': 'value1'} - policy_data = { - 'DummyPolicy': { - 'version': '2.0', - 'data': data - } - } - res = policy._extract_policy_data(policy_data) - self.assertIsNone(res) - - @mock.patch.object(pb.Policy, '_build_conn_params') - @mock.patch('senlin.drivers.base.SenlinDriver') - def test_keystone(self, mock_sd, mock_params): - policy = self._create_policy('test-policy') - fake_params = mock.Mock() - mock_params.return_value = fake_params - kc = mock.Mock() - driver = mock.Mock() - driver.identity.return_value = kc - mock_sd.return_value = driver - - res = policy.keystone('user1', 'project1') - - self.assertEqual(kc, res) - self.assertEqual(kc, policy._keystoneclient) - mock_params.assert_called_once_with('user1', 'project1') - mock_sd.assert_called_once_with() - driver.identity.assert_called_once_with(fake_params) - - def test_keystone_already_initialized(self): - policy = self._create_policy('test-policy') - x_keystone = mock.Mock() - policy._keystoneclient = x_keystone - - result = policy.keystone('foo', 'bar') - - self.assertEqual(x_keystone, result) - - @mock.patch.object(pb.Policy, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_nova(self, mock_driver, mock_params): - policy = self._create_policy('test-policy') - fake_params = mock.Mock() - mock_params.return_value = fake_params - x_driver = mock.Mock() - mock_driver.return_value = x_driver - - result = policy.nova('user1', 'project1') - - x_nova = x_driver.compute.return_value - self.assertEqual(x_nova, result) - self.assertEqual(x_nova, policy._novaclient) - mock_params.assert_called_once_with('user1', 'project1') - x_driver.compute.assert_called_once_with(fake_params) - - def test_nova_already_initialized(self): - policy = self._create_policy('test-policy') - x_nova = mock.Mock() - policy._novaclient = x_nova - - result = policy.nova('foo', 'bar') - - self.assertEqual(x_nova, result) - - @mock.patch.object(pb.Policy, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_network(self, mock_driver, mock_params): - policy = self._create_policy('test-policy') - fake_params = mock.Mock() - mock_params.return_value = fake_params - x_driver = mock.Mock() - mock_driver.return_value = x_driver - - result = policy.network('user1', 'project1') - - x_network = x_driver.network.return_value - self.assertEqual(x_network, result) - self.assertEqual(x_network, policy._networkclient) - mock_params.assert_called_once_with('user1', 'project1') - x_driver.network.assert_called_once_with(fake_params) - - def test_network_already_initialized(self): - policy = self._create_policy('test-policy') - x_network = mock.Mock() - policy._networkclient = x_network - - result = policy.network('foo', 'bar') - - self.assertEqual(x_network, result) - - @mock.patch.object(pb.Policy, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_lbaas(self, mock_driver, mock_params): - policy = self._create_policy('test-policy') - fake_params = mock.Mock() - mock_params.return_value = fake_params - x_driver = mock.Mock() - mock_driver.return_value = x_driver - - result = policy.lbaas('user1', 'project1') - - x_lbaas = x_driver.loadbalancing.return_value - self.assertEqual(x_lbaas, result) - self.assertEqual(x_lbaas, policy._lbaasclient) - mock_params.assert_called_once_with('user1', 'project1') - x_driver.loadbalancing.assert_called_once_with(fake_params) - - def test_lbaas_already_initialized(self): - policy = self._create_policy('test-policy') - x_lbaas = mock.Mock() - policy._lbaasclient = x_lbaas - - result = policy.lbaas('foo', 'bar') - - self.assertEqual(x_lbaas, result) - - def test_default_need_check(self): - action = mock.Mock() - action.action = consts.CLUSTER_SCALE_IN - action.data = {} - - policy = self._create_policy('test-policy') - res = policy.need_check('BEFORE', action) - self.assertTrue(res) - - setattr(policy, 'TARGET', [('BEFORE', consts.CLUSTER_SCALE_IN)]) - res = policy.need_check('BEFORE', action) - self.assertTrue(res) - res = policy.need_check('AFTER', action) - self.assertFalse(res) - - def test_default_pre_op(self): - policy = self._create_policy('test-policy') - res = policy.pre_op('CLUSTER_ID', 'FOO') - self.assertIsNone(res) - - def test_default_post_op(self): - policy = self._create_policy('test-policy') - res = policy.post_op('CLUSTER_ID', 'FOO') - self.assertIsNone(res) - - def test_default_attach(self): - cluster = mock.Mock() - policy = self._create_policy('test-policy') - - # Policy targets on ANY profile types - policy.PROFILE_TYPE = ['ANY'] - res = policy.attach(cluster) - self.assertEqual((True, None), res) - - # Profile type of cluster is not in policy's target scope - profile = mock.Mock() - profile.type = 'os.nova.server' - cluster.rt = {'profile': profile} - policy.PROFILE_TYPE = ['os.heat.resource'] - msg = 'Policy not applicable on profile type: os.nova.server' - res = policy.attach(cluster) - self.assertEqual((False, msg), res) - - # Attaching succeed - policy.PROFILE_TYPE = ['os.nova.server', 'os.heat.resource'] - res = policy.attach(cluster) - self.assertEqual((True, None), res) - - def test_default_detach(self): - cluster = mock.Mock() - policy = self._create_policy('test-policy') - - res = policy.detach(cluster) - self.assertEqual((True, None), res) - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(senlin_ctx, 'get_service_credentials') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params(self, mock_get_current, mock_get_service_creds, - mock_cred_get): - service_cred = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123', - 'project_domain_name': 'Domain', - 'verify': True, - 'interface': 'Public', - } - current_ctx = { - 'auth_url': 'auth_url', - 'user_name': 'user1', - 'user_domain_name': 'default', - 'password': '456' - } - cred_info = { - 'openstack': { - 'trust': 'TRUST_ID', - } - } - - cred = mock.Mock(cred=cred_info) - mock_get_service_creds.return_value = service_cred - mock_get_current.return_value = current_ctx - mock_cred_get.return_value = cred - policy = self._create_policy('test-policy') - - res = policy._build_conn_params('user1', 'project1') - - expected_result = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123', - 'trust_id': 'TRUST_ID', - 'project_domain_name': 'Domain', - 'verify': True, - 'interface': 'Public', - } - self.assertEqual(expected_result, res) - mock_get_service_creds.assert_called_once_with() - mock_cred_get.assert_called_once_with(current_ctx, 'user1', 'project1') - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(senlin_ctx, 'get_service_credentials') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params_trust_not_found( - self, mock_get_current, mock_get_service_creds, mock_cred_get): - - service_cred = { - 'auth_url': 'AUTH_URL', - 'username': 'senlin', - 'user_domain_name': 'default', - 'password': '123' - } - - mock_get_service_creds.return_value = service_cred - mock_cred_get.return_value = None - policy = self._create_policy('test-policy') - - ex = self.assertRaises(exception.TrustNotFound, - policy._build_conn_params, - 'user1', 'project1') - - msg = "The trust for trustor 'user1' could not be found." - self.assertEqual(msg, str(ex)) diff --git a/senlin/tests/unit/policies/test_region_placement.py b/senlin/tests/unit/policies/test_region_placement.py deleted file mode 100644 index 4e994f928..000000000 --- a/senlin/tests/unit/policies/test_region_placement.py +++ /dev/null @@ -1,426 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common import scaleutils as su -from senlin.engine import cluster as cm -from senlin.policies import base as pb -from senlin.policies import region_placement as rp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestRegionPlacementPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestRegionPlacementPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.region_placement', - 'version': '1.0', - 'properties': { - 'regions': [ - {'name': 'R1', 'weight': 100, 'cap': 50}, - {'name': 'R2', 'weight': 50, 'cap': 50}, - {'name': 'R3', 'weight': 30, 'cap': -1}, - {'name': 'R4', 'weight': 20, 'cap': -1} - ] - } - } - - def test_policy_init(self): - policy = rp.RegionPlacementPolicy('test-policy', self.spec) - - self.assertIsNone(policy.id) - self.assertIsNone(policy. _keystoneclient) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.region_placement-1.0', policy.type) - expected = { - 'R1': { - 'weight': 100, - 'cap': 50 - }, - 'R2': { - 'weight': 50, - 'cap': 50, - }, - 'R3': { - 'weight': 30, - 'cap': -1, - }, - 'R4': { - 'weight': 20, - 'cap': -1, - } - } - self.assertEqual(expected, policy.regions) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_okay(self, mock_base_validate): - policy = rp.RegionPlacementPolicy('test-policy', self.spec) - kc = mock.Mock() - kc.validate_regions.return_value = ['R1', 'R2', 'R3', 'R4'] - policy._keystoneclient = kc - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, True) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, True) - kc.validate_regions.assert_called_once_with(['R1', 'R2', 'R3', 'R4']) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_no_validate_props(self, mock_base_validate): - policy = rp.RegionPlacementPolicy('test-policy', self.spec) - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, False) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, False) - - @mock.patch.object(pb.Policy, 'validate') - def test_validate_region_not_found(self, mock_base_validate): - policy = rp.RegionPlacementPolicy('test-policy', self.spec) - kc = mock.Mock() - kc.validate_regions.return_value = ['R2', 'R4'] - policy._keystoneclient = kc - ctx = mock.Mock(user='U1', project='P1') - - ex = self.assertRaises(exc.InvalidSpec, - policy.validate, - ctx, True) - - mock_base_validate.assert_called_once_with(ctx, True) - kc.validate_regions.assert_called_once_with(['R1', 'R2', 'R3', 'R4']) - self.assertEqual("The specified regions '['R1', 'R3']' could not " - "be found.", str(ex)) - - def test_create_plan(self): - policy = rp.RegionPlacementPolicy('p1', self.spec) - regions = policy.regions - - current = {'R1': 2, 'R2': 2, 'R3': 2, 'R4': 1} - result = policy._create_plan(current, regions, 5, True) - expected = {'R1': 4, 'R2': 1} - self.assertEqual(expected, result) - - current = {'R1': 2, 'R2': 2, 'R3': 0, 'R4': 1} - plan = policy._create_plan(current, regions, 5, True) - answer = {'R1': 3, 'R2': 1, 'R3': 1} - self.assertEqual(answer, plan) - - current = {'R1': 2, 'R2': 2, 'R3': 0, 'R4': 1} - plan = policy._create_plan(current, regions, 3, False) - answer = {'R2': 2, 'R4': 1} - self.assertEqual(answer, plan) - - current = {'R1': 4, 'R2': 2, 'R3': 1, 'R4': 1} - plan = policy._create_plan(current, regions, 3, False) - answer = {'R2': 1, 'R3': 1, 'R4': 1} - self.assertEqual(answer, plan) - - def test_get_count_node_create_no_region(self): - x_profile = mock.Mock(CONTEXT='context', properties={'context': {}}) - x_node = mock.Mock(rt={'profile': x_profile}) - action = mock.Mock(action=consts.NODE_CREATE, entity=x_node) - - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - def test_get_count_node_create_region_specified(self): - x_profile = mock.Mock(CONTEXT='context', - properties={'context': {'region_name': 'foo'}}) - x_node = mock.Mock(rt={'profile': x_profile}) - action = mock.Mock(action=consts.NODE_CREATE, entity=x_node) - - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(0, res) - - def test_get_count_resize_deletion(self): - action = mock.Mock(action=consts.CLUSTER_RESIZE, - data={'deletion': {'count': 3}}) - - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_resize_creation(self): - action = mock.Mock(action=consts.CLUSTER_RESIZE, - data={'creation': {'count': 3}}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(3, res) - - @mock.patch.object(su, 'parse_resize_params') - def test_get_count_resize_parse_error(self, mock_parse): - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock()] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = x_cluster - mock_parse.return_value = (pb.CHECK_ERROR, 'Something wrong.') - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(0, res) - self.assertEqual(pb.CHECK_ERROR, action.data['status']) - mock_parse.assert_called_once_with(action, x_cluster, 2) - self.assertEqual('Something wrong.', action.data['reason']) - - @mock.patch.object(su, 'parse_resize_params') - def test_get_count_resize_parse_creation(self, mock_parse): - def fake_parse(action, cluster, current): - action.data = {'creation': {'count': 3}} - return pb.CHECK_OK, '' - - x_cluster = mock.Mock() - x_cluster.nodes = [] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = x_cluster - - mock_parse.side_effect = fake_parse - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(3, res) - mock_parse.assert_called_once_with(action, x_cluster, 0) - - @mock.patch.object(su, 'parse_resize_params') - def test_get_count_resize_parse_deletion(self, mock_parse): - def fake_parse(action, cluster, current): - action.data = {'deletion': {'count': 3}} - return pb.CHECK_OK, '' - - x_cluster = mock.Mock() - x_cluster.nodes = [mock.Mock(), mock.Mock(), mock.Mock()] - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - action.entity = x_cluster - - mock_parse.side_effect = fake_parse - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(-3, res) - mock_parse.assert_called_once_with(action, x_cluster, 3) - - def test_get_count_scale_in_with_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, - data={'deletion': {'count': 3}}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_scale_in_with_no_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, - data={'deletion': {'num': 3}}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-1, res) - - def test_get_count_scale_in_with_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, - inputs={'count': 3}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_scale_in_with_incorrect_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, - inputs={'num': 3}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-1, res) - - def test_get_count_scale_out_with_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - data={'creation': {'count': 3}}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(3, res) - - def test_get_count_scale_out_with_no_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - data={'creation': {'num': 3}}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - def test_get_count_scale_out_with_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, - inputs={'count': 3}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(3, res) - - def test_get_count_scale_out_with_incorrect_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, - inputs={'num': 3}) - policy = rp.RegionPlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - @mock.patch.object(cm.Cluster, 'load') - def test_pre_op(self, mock_load): - # test pre_op method whether returns the correct action.data - policy = rp.RegionPlacementPolicy('p1', self.spec) - regions = policy.regions - - kc = mock.Mock() - kc.validate_regions.return_value = regions.keys() - policy._keystoneclient = kc - - plan = {'R1': 1, 'R3': 2} - self.patchobject(policy, '_create_plan', return_value=plan) - - action = mock.Mock() - action.context = self.context - action.action = 'CLUSTER_SCALE_OUT' - action.inputs = {} - action.data = { - 'creation': { - 'count': 3, - } - } - - cluster = mock.Mock() - current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0} - cluster.get_region_distribution.return_value = current_dist - mock_load.return_value = cluster - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - - self.assertEqual(3, action.data['creation']['count']) - dist = action.data['creation']['regions'] - self.assertEqual(2, len(dist)) - self.assertEqual(1, dist['R1']) - self.assertEqual(2, dist['R3']) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - kc.validate_regions.assert_called_once_with(regions.keys()) - cluster.get_region_distribution.assert_called_once_with(regions.keys()) - policy._create_plan.assert_called_once_with( - current_dist, regions, 3, True) - - @mock.patch.object(cm.Cluster, 'load') - def test_pre_op_count_from_inputs(self, mock_load): - # test pre_op method whether returns the correct action.data - policy = rp.RegionPlacementPolicy('p1', self.spec) - regions = policy.regions - - kc = mock.Mock() - kc.validate_regions.return_value = regions.keys() - policy._keystoneclient = kc - - cluster = mock.Mock() - current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0} - cluster.get_region_distribution.return_value = current_dist - mock_load.return_value = cluster - - plan = {'R1': 1, 'R3': 2} - self.patchobject(policy, '_create_plan', return_value=plan) - - action = mock.Mock() - action.context = self.context - action.action = 'CLUSTER_SCALE_OUT' - action.inputs = {'count': 3} - action.data = {} - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - self.assertEqual(3, action.data['creation']['count']) - dist = action.data['creation']['regions'] - self.assertEqual(2, len(dist)) - self.assertEqual(1, dist['R1']) - self.assertEqual(2, dist['R3']) - - @mock.patch.object(cm.Cluster, 'load') - def test_pre_op_no_regions(self, mock_load): - # test pre_op method whether returns the correct action.data - policy = rp.RegionPlacementPolicy('p1', self.spec) - kc = mock.Mock() - kc.validate_regions.return_value = [] - policy._keystoneclient = kc - - action = mock.Mock() - action.action = 'CLUSTER_SCALE_OUT' - action.context = self.context - action.data = {'creation': {'count': 3}} - - cluster = mock.Mock() - mock_load.return_value = cluster - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - self.assertEqual('ERROR', action.data['status']) - self.assertEqual('No region is found usable.', action.data['reason']) - - @mock.patch.object(cm.Cluster, 'load') - def test_pre_op_no_feasible_plan(self, mock_load): - # test pre_op method whether returns the correct action.data - policy = rp.RegionPlacementPolicy('p1', self.spec) - regions = policy.regions - - kc = mock.Mock() - kc.validate_regions.return_value = regions.keys() - policy._keystoneclient = kc - - self.patchobject(policy, '_create_plan', return_value=None) - - action = mock.Mock() - action.action = 'CLUSTER_SCALE_OUT' - action.context = self.context - action.inputs = {} - action.data = {'creation': {'count': 3}} - - cluster = mock.Mock() - current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0} - cluster.get_region_distribution.return_value = current_dist - mock_load.return_value = cluster - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - - self.assertEqual('ERROR', action.data['status']) - self.assertEqual('There is no feasible plan to handle all nodes.', - action.data['reason']) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - kc.validate_regions.assert_called_once_with(regions.keys()) - cluster.get_region_distribution.assert_called_once_with(regions.keys()) - policy._create_plan.assert_called_once_with( - current_dist, regions, 3, True) diff --git a/senlin/tests/unit/policies/test_scaling_policy.py b/senlin/tests/unit/policies/test_scaling_policy.py deleted file mode 100644 index 8ec7c685a..000000000 --- a/senlin/tests/unit/policies/test_scaling_policy.py +++ /dev/null @@ -1,450 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_utils import timeutils -import time - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.objects import cluster_policy as cpo -from senlin.objects import node as no -from senlin.policies import base as pb -from senlin.policies import scaling_policy as sp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -PROFILE_ID = 'aa5f86b8-e52b-4f2b-828a-4c14c770938d' -CLUSTER_ID = '2c5139a6-24ba-4a6f-bd53-a268f61536de' -CLUSTER_NOMAXSIZE_ID = 'e470c11d-910d-491b-a7c3-93b047a6108d' - - -class TestScalingPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestScalingPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.scaling', - 'version': '1.0', - 'properties': { - 'event': 'CLUSTER_SCALE_IN', - 'adjustment': { - 'type': 'CHANGE_IN_CAPACITY', - 'number': 1, - 'min_step': 1, - 'best_effort': False, - 'cooldown': 3, - } - } - } - self.profile = utils.create_profile(self.context, PROFILE_ID) - self.cluster = utils.create_cluster(self.context, CLUSTER_ID, - PROFILE_ID) - self.cluster_no_maxsize = utils.create_cluster( - self.context, CLUSTER_NOMAXSIZE_ID, PROFILE_ID, max_size=-1) - - def _create_nodes(self, count): - NODE_IDS = [ - '6eaa45fa-bd2e-426d-ae49-f75db1a4bd73', - '8bf73953-b57b-4e6b-bdef-83fa9420befb', - 'c3058ea0-5241-466b-89bc-6a85f6050a11', - ] - PHYSICAL_IDS = [ - '2417c5d6-9a89-4637-9ba6-82c00b180cb7', - '374bf2b9-30ba-4a9b-822b-1196f6d4a368', - '2a1b7e37-de18-4b22-9489-a7a413fdfe48', - ] - - nodes = [] - for i in range(count): - node = utils.create_node(self.context, NODE_IDS[i], PROFILE_ID, - CLUSTER_ID, PHYSICAL_IDS[i]) - nodes.append(node) - return nodes - - def test_policy_init(self): - policy = sp.ScalingPolicy('p1', self.spec) - self.assertFalse(policy.singleton) - - self.assertIsNone(policy.id) - self.assertEqual('p1', policy.name) - self.assertEqual('senlin.policy.scaling-1.0', policy.type) - self.assertEqual('CLUSTER_SCALE_IN', policy.event) - adjustment = self.spec['properties']['adjustment'] - self.assertEqual(adjustment['type'], policy.adjustment_type) - self.assertEqual(adjustment['number'], policy.adjustment_number) - self.assertEqual(adjustment['min_step'], policy.adjustment_min_step) - self.assertEqual(adjustment['best_effort'], policy.best_effort) - self.assertEqual(adjustment['cooldown'], policy.cooldown) - - def test_policy_init_default_value(self): - self.spec['properties']['adjustment'] = {} - policy = sp.ScalingPolicy('p1', self.spec) - - self.assertIsNone(policy.id) - self.assertEqual('senlin.policy.scaling-1.0', policy.type) - self.assertEqual('p1', policy.name) - self.assertEqual(consts.CHANGE_IN_CAPACITY, policy.adjustment_type) - self.assertEqual(1, policy.adjustment_number) - self.assertEqual(1, policy.adjustment_min_step) - self.assertFalse(policy.best_effort) - self.assertEqual(0, policy.cooldown) - - def test_validate(self): - self.spec['properties']['adjustment'] = {} - policy = sp.ScalingPolicy('p1', self.spec) - - policy.validate(self.context) - - def test_validate_bad_number(self): - self.spec['properties']['adjustment'] = {"number": -1} - policy = sp.ScalingPolicy('p1', self.spec) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, self.context) - - self.assertEqual("the 'number' for 'adjustment' must be > 0", - str(ex)) - - def test_validate_bad_min_step(self): - self.spec['properties']['adjustment'] = {"min_step": -1} - policy = sp.ScalingPolicy('p1', self.spec) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, self.context) - - self.assertEqual("the 'min_step' for 'adjustment' must be >= 0", - str(ex)) - - def test_validate_bad_cooldown(self): - self.spec['properties']['adjustment'] = {"cooldown": -1} - policy = sp.ScalingPolicy('p1', self.spec) - - ex = self.assertRaises(exc.InvalidSpec, policy.validate, self.context) - - self.assertEqual("the 'cooldown' for 'adjustment' must be >= 0", - str(ex)) - - def test_calculate_adjustment_count(self): - adjustment = self.spec['properties']['adjustment'] - # adjustment_type as EXACT_CAPACITY and event as cluster_scale_in - current_size = 3 - adjustment['type'] = consts.EXACT_CAPACITY - adjustment['number'] = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - policy.event = consts.CLUSTER_SCALE_IN - count = policy._calculate_adjustment_count(current_size) - self.assertEqual(2, count) - - # adjustment_type as EXACT_CAPACITY and event as cluster_scale_out - current_size = 3 - adjustment['type'] = consts.EXACT_CAPACITY - adjustment['number'] = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - policy.event = consts.CLUSTER_SCALE_OUT - count = policy._calculate_adjustment_count(current_size) - self.assertEqual(-2, count) - - # adjustment_type is CHANGE_IN_CAPACITY - adjustment['type'] = consts.CHANGE_IN_CAPACITY - adjustment['number'] = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - count = policy._calculate_adjustment_count(current_size) - self.assertEqual(1, count) - - # adjustment_type is CHANGE_IN_PERCENTAGE - current_size = 10 - adjustment['type'] = consts.CHANGE_IN_PERCENTAGE - adjustment['number'] = 50 - policy = sp.ScalingPolicy('test-policy', self.spec) - count = policy._calculate_adjustment_count(current_size) - self.assertEqual(5, count) - - # adjustment_type is CHANGE_IN_PERCENTAGE and min_step is 2 - adjustment['type'] = consts.CHANGE_IN_PERCENTAGE - adjustment['number'] = 1 - adjustment['min_step'] = 2 - policy = sp.ScalingPolicy('test-policy', self.spec) - count = policy._calculate_adjustment_count(current_size) - self.assertEqual(2, count) - - def test_pre_op_pass_without_input(self): - nodes = self._create_nodes(3) - self.cluster.nodes = nodes - - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['type'] = consts.EXACT_CAPACITY - adjustment['number'] = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - - policy.pre_op(self.cluster['id'], action) - pd = { - 'deletion': { - 'count': 2, - }, - 'reason': 'Scaling request validated.', - 'status': pb.CHECK_OK, - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - def test_pre_op_pass_with_input(self): - nodes = self._create_nodes(3) - self.cluster.nodes = nodes - - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {'count': 1, 'last_op': timeutils.utcnow(True)} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['type'] = consts.CHANGE_IN_CAPACITY - adjustment['number'] = 2 - adjustment['cooldown'] = 1 - policy = sp.ScalingPolicy('p1', self.spec) - - time.sleep(1) - - policy.pre_op(self.cluster['id'], action) - pd = { - 'deletion': { - 'count': 1, - }, - 'reason': 'Scaling request validated.', - 'status': pb.CHECK_OK, - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - # count value is string rather than integer - action.inputs = {'count': '1'} - policy.pre_op(self.cluster['id'], action) - pd = { - 'deletion': { - 'count': 1, - }, - 'reason': 'Scaling request validated.', - 'status': pb.CHECK_OK, - } - action.data.update.assert_called_with(pd) - - def test_pre_op_within_cooldown(self): - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {'last_op': timeutils.utcnow(True)} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['cooldown'] = 300 - kwargs = {'id': "FAKE_ID"} - policy = sp.ScalingPolicy('p1', self.spec, **kwargs) - - policy.pre_op('FAKE_CLUSTER_ID', action) - pd = { - 'status': pb.CHECK_ERROR, - 'reason': "Policy FAKE_ID cooldown is still in progress.", - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - @mock.patch.object(sp.ScalingPolicy, '_calculate_adjustment_count') - def test_pre_op_pass_check_effort(self, mock_adjustmentcount): - # Cluster with maxsize and best_effort is False - self.cluster.nodes = [mock.Mock(), mock.Mock()] - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_OUT - action.inputs = {} - action.entity = self.cluster - - mock_adjustmentcount.return_value = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - policy.event = consts.CLUSTER_SCALE_OUT - policy.best_effort = True - policy.pre_op(self.cluster_no_maxsize['id'], action) - pd = { - 'creation': { - 'count': 1, - }, - 'reason': 'Scaling request validated.', - 'status': pb.CHECK_OK, - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - def test_pre_op_fail_negative_count(self): - nodes = self._create_nodes(3) - self.cluster.nodes = nodes - - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['type'] = consts.EXACT_CAPACITY - adjustment['number'] = 5 - policy = sp.ScalingPolicy('test-policy', self.spec) - - policy.pre_op(self.cluster['id'], action) - - pd = { - 'status': pb.CHECK_ERROR, - 'reason': "Invalid count (-2) for action 'CLUSTER_SCALE_IN'.", - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - def test_pre_op_fail_below_min_size(self): - nodes = self._create_nodes(3) - self.cluster.nodes = nodes - - action = mock.Mock() - action.action = consts.CLUSTER_SCALE_IN - action.context = self.context - action.inputs = {} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['type'] = consts.CHANGE_IN_CAPACITY - adjustment['number'] = 3 - policy = sp.ScalingPolicy('test-policy', self.spec) - - policy.pre_op(self.cluster['id'], action) - - pd = { - 'status': pb.CHECK_ERROR, - 'reason': ("The target capacity (0) is less than the cluster's " - "min_size (1)."), - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - def test_pre_op_pass_best_effort(self): - nodes = self._create_nodes(3) - self.cluster.nodes = nodes - - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['best_effort'] = True - adjustment['type'] = consts.CHANGE_IN_CAPACITY - adjustment['number'] = 3 - policy = sp.ScalingPolicy('test-policy', self.spec) - - policy.pre_op(self.cluster['id'], action) - - pd = { - 'deletion': { - 'count': 2, - }, - 'status': pb.CHECK_OK, - 'reason': 'Scaling request validated.', - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - def test_pre_op_with_bad_nodes(self): - nodes = self._create_nodes(3) - no.Node.update(self.context, nodes[0].id, {'status': 'ERROR'}) - self.cluster.nodes = nodes - - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.inputs = {} - action.entity = self.cluster - - adjustment = self.spec['properties']['adjustment'] - adjustment['type'] = consts.EXACT_CAPACITY - adjustment['number'] = 1 - policy = sp.ScalingPolicy('test-policy', self.spec) - - policy.pre_op(self.cluster['id'], action) - pd = { - 'deletion': { - 'count': 2, - }, - 'reason': 'Scaling request validated.', - 'status': pb.CHECK_OK, - } - action.data.update.assert_called_with(pd) - action.store.assert_called_with(self.context) - - @mock.patch.object(cpo.ClusterPolicy, 'update') - @mock.patch.object(timeutils, 'utcnow') - def test_post_op(self, mock_time, mock_cluster_policy): - action = mock.Mock() - action.context = self.context - - mock_time.return_value = 'FAKE_TIME' - - kwargs = {'id': 'FAKE_POLICY_ID'} - policy = sp.ScalingPolicy('test-policy', self.spec, **kwargs) - - policy.post_op('FAKE_CLUSTER_ID', action) - mock_cluster_policy.assert_called_once_with( - action.context, 'FAKE_CLUSTER_ID', 'FAKE_POLICY_ID', - {'last_op': 'FAKE_TIME'}) - - def test_need_check_in_event_before(self): - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_IN - action.data = {} - - policy = sp.ScalingPolicy('test-policy', self.spec) - res = policy.need_check('BEFORE', action) - self.assertTrue(res) - - def test_need_check_not_in_event_before(self): - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_OUT - action.data = {} - - policy = sp.ScalingPolicy('test-policy', self.spec) - res = policy.need_check('BEFORE', action) - self.assertFalse(res) - - def test_need_check_in_event_after(self): - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_SCALE_OUT - action.data = {} - - policy = sp.ScalingPolicy('test-policy', self.spec) - res = policy.need_check('AFTER', action) - self.assertTrue(res) - - def test_need_check_not_in_event_after(self): - action = mock.Mock() - action.context = self.context - action.action = consts.CLUSTER_ATTACH_POLICY - action.data = {} - - policy = sp.ScalingPolicy('test-policy', self.spec) - res = policy.need_check('AFTER', action) - self.assertFalse(res) diff --git a/senlin/tests/unit/policies/test_zone_placement.py b/senlin/tests/unit/policies/test_zone_placement.py deleted file mode 100644 index 87f1edaa7..000000000 --- a/senlin/tests/unit/policies/test_zone_placement.py +++ /dev/null @@ -1,418 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.common import scaleutils as su -from senlin.engine import cluster as cluster_mod -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.policies import base as policy_base -from senlin.policies import zone_placement as zp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestZonePlacementPolicy(base.SenlinTestCase): - - def setUp(self): - super(TestZonePlacementPolicy, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'senlin.policy.zone_placement', - 'version': '1.0', - 'properties': { - 'zones': [ - {'name': 'AZ1', 'weight': 100}, - {'name': 'AZ2', 'weight': 80}, - {'name': 'AZ3', 'weight': 60}, - {'name': 'AZ4', 'weight': 40} - ] - } - } - - def test_policy_init(self): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - - self.assertIsNone(policy.id) - self.assertEqual('test-policy', policy.name) - self.assertEqual('senlin.policy.zone_placement-1.0', policy.type) - expected = {'AZ1': 100, 'AZ2': 80, 'AZ3': 60, 'AZ4': 40} - self.assertEqual(expected, policy.zones) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_okay(self, mock_base_validate): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - nc = mock.Mock() - nc.validate_azs.return_value = ['AZ1', 'AZ2', 'AZ3', 'AZ4'] - policy._novaclient = nc - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, True) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, True) - nc.validate_azs.assert_called_once_with(['AZ1', 'AZ2', 'AZ3', 'AZ4']) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_no_validate_props(self, mock_base_validate): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - ctx = mock.Mock(user='U1', project='P1') - - res = policy.validate(ctx, False) - - self.assertTrue(res) - mock_base_validate.assert_called_once_with(ctx, False) - - @mock.patch.object(policy_base.Policy, 'validate') - def test_validate_az_not_found(self, mock_base_validate): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - nc = mock.Mock() - nc.validate_azs.return_value = ['AZ1', 'AZ4'] - policy._novaclient = nc - ctx = mock.Mock(user='U1', project='P1') - - ex = self.assertRaises(exc.InvalidSpec, - policy.validate, - ctx, True) - - mock_base_validate.assert_called_once_with(ctx, True) - nc.validate_azs.assert_called_once_with(['AZ1', 'AZ2', 'AZ3', 'AZ4']) - self.assertEqual("The specified name '['AZ2', 'AZ3']' " - "could not be found.", str(ex)) - - def test_create_plan_default(self): - self.spec['properties']['zones'] = [ - {'name': 'AZ1'}, {'name': 'AZ2'}, {'name': 'AZ3'}, {'name': 'AZ4'} - ] - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - zones = policy.zones - - current = {'AZ1': 2, 'AZ2': 2, 'AZ3': 2, 'AZ4': 1} - plan = policy._create_plan(current, zones, 5, True) - answer = {'AZ1': 1, 'AZ2': 1, 'AZ3': 1, 'AZ4': 2} - self.assertEqual(answer, plan) - - def test_create_plan(self): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - zones = policy.zones - - current = {'AZ1': 2, 'AZ2': 2, 'AZ3': 2, 'AZ4': 1} - plan = policy._create_plan(current, zones, 7, True) - answer = {'AZ1': 3, 'AZ2': 2, 'AZ3': 1, 'AZ4': 1} - self.assertEqual(answer, plan) - - current = {'AZ1': 2, 'AZ2': 4, 'AZ3': 2, 'AZ4': 2} - plan = policy._create_plan(current, zones, 6, True) - answer = {'AZ1': 4, 'AZ2': 1, 'AZ3': 1} - self.assertEqual(answer, plan) - - current = {'AZ1': 4, 'AZ2': 5, 'AZ3': 1, 'AZ4': 1} - plan = policy._create_plan(current, zones, 3, False) - answer = {'AZ2': 3} - self.assertEqual(answer, plan) - - current = {'AZ1': 6, 'AZ2': 2, 'AZ3': 4, 'AZ4': 6} - plan = policy._create_plan(current, zones, 4, False) - answer = {'AZ4': 4} - self.assertEqual(answer, plan) - - def test_get_count_node_create_with_zone(self): - x_profile = mock.Mock(AVAILABILITY_ZONE='availability_zone', - properties={'availability_zone': 'zone1'}) - x_node = mock.Mock(rt={'profile': x_profile}) - action = mock.Mock(action=consts.NODE_CREATE, entity=x_node) - - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(0, res) - - def test_get_count_node_create_without_zone(self): - x_profile = mock.Mock(AVAILABILITY_ZONE='availability_zone', - properties={'availability_zone': None}) - x_node = mock.Mock(rt={'profile': x_profile}) - action = mock.Mock(action=consts.NODE_CREATE, entity=x_node) - - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - def test_get_count_resize_deletion(self): - action = mock.Mock(action=consts.CLUSTER_RESIZE, - data={'deletion': {'count': 3}}) - - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_resize_creation(self): - action = mock.Mock(action=consts.CLUSTER_RESIZE, - data={'creation': {'count': 3}}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(3, res) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(co.Cluster, 'get') - def test_get_count_resize_parse_error(self, mock_cluster, mock_parse, - mock_count): - x_cluster = mock.Mock() - mock_cluster.return_value = x_cluster - mock_count.return_value = 3 - mock_parse.return_value = (policy_base.CHECK_ERROR, 'Something wrong.') - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(0, res) - self.assertEqual(policy_base.CHECK_ERROR, action.data['status']) - self.assertEqual('Something wrong.', action.data['reason']) - mock_cluster.assert_called_once_with(action.context, 'FOO') - mock_count.assert_called_once_with(action.context, 'FOO') - mock_parse.assert_called_once_with(action, x_cluster, 3) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(co.Cluster, 'get') - def test_get_count_resize_parse_creation(self, mock_cluster, mock_parse, - mock_count): - def fake_parse(action, cluster, current): - action.data = {'creation': {'count': 3}} - return policy_base.CHECK_OK, '' - - x_cluster = mock.Mock() - mock_cluster.return_value = x_cluster - mock_parse.side_effect = fake_parse - mock_count.return_value = 3 - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(3, res) - mock_cluster.assert_called_once_with(action.context, 'FOO') - mock_count.assert_called_once_with(action.context, 'FOO') - mock_parse.assert_called_once_with(action, x_cluster, 3) - - @mock.patch.object(no.Node, 'count_by_cluster') - @mock.patch.object(su, 'parse_resize_params') - @mock.patch.object(co.Cluster, 'get') - def test_get_count_resize_parse_deletion(self, mock_cluster, mock_parse, - mock_count): - def fake_parse(action, cluster, current): - action.data = {'deletion': {'count': 3}} - return policy_base.CHECK_OK, '' - - x_cluster = mock.Mock() - mock_cluster.return_value = x_cluster - mock_count.return_value = 3 - mock_parse.side_effect = fake_parse - action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - - self.assertEqual(-3, res) - mock_cluster.assert_called_once_with(action.context, 'FOO') - mock_count.assert_called_once_with(action.context, 'FOO') - mock_parse.assert_called_once_with(action, x_cluster, 3) - - def test_get_count_scale_in_with_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, - data={'deletion': {'count': 3}}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_scale_in_with_no_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, - data={'deletion': {'num': 3}}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-1, res) - - def test_get_count_scale_in_with_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, - inputs={'count': 3}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-3, res) - - def test_get_count_scale_in_with_incorrect_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, - inputs={'num': 3}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(-1, res) - - def test_get_count_scale_out_with_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - data={'creation': {'count': 3}}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(3, res) - - def test_get_count_scale_out_with_no_data(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - data={'creation': {'num': 3}}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - def test_get_count_scale_out_with_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, - inputs={'count': 3}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(3, res) - - def test_get_count_scale_out_with_incorrect_inputs(self): - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, - inputs={'num': 3}) - policy = zp.ZonePlacementPolicy('p1', self.spec) - - res = policy._get_count('FOO', action) - self.assertEqual(1, res) - - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_pre_op_expand_using_input(self, mock_load): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - zones = policy.zones - - nc = mock.Mock() - nc.validate_azs.return_value = zones.keys() - policy._novaclient = nc - - action = mock.Mock() - action.action = 'CLUSTER_SCALE_OUT' - action.context = self.context - action.data = {} - action.inputs = {'count': 7} - - cluster = mock.Mock(user='user1', project='project1') - current_dist = {'AZ1': 2, 'AZ2': 3, 'AZ3': 2, 'AZ4': 1} - cluster.get_zone_distribution.return_value = current_dist - mock_load.return_value = cluster - - policy.pre_op('FAKE_CLUSTER', action) - - self.assertEqual(7, action.data['creation']['count']) - dist = action.data['creation']['zones'] - self.assertEqual(4, dist['AZ1']) - self.assertEqual(2, dist['AZ2']) - self.assertEqual(1, dist['AZ3']) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - nc.validate_azs.assert_called_once_with(zones.keys()) - cluster.get_zone_distribution.assert_called_once_with( - action.context, zones.keys()) - - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_pre_op_shrink_using_data(self, mock_load): - policy = zp.ZonePlacementPolicy('test-policy', self.spec) - zones = policy.zones - - nc = mock.Mock() - nc.validate_azs.return_value = zones.keys() - policy._novaclient = nc - - action = mock.Mock(action=consts.CLUSTER_SCALE_IN, - context=self.context, inputs={}, - data={'deletion': {'count': 2}}) - - cluster = mock.Mock(user='user1', project='project1') - current_dist = {'AZ1': 2, 'AZ2': 2, 'AZ3': 2, 'AZ4': 1} - cluster.get_zone_distribution.return_value = current_dist - mock_load.return_value = cluster - - policy.pre_op('FAKE_CLUSTER', action) - - self.assertEqual(2, action.data['deletion']['count']) - dist = action.data['deletion']['zones'] - self.assertEqual(1, dist['AZ3']) - self.assertEqual(1, dist['AZ4']) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - nc.validate_azs.assert_called_once_with(zones.keys()) - cluster.get_zone_distribution.assert_called_once_with( - action.context, zones.keys()) - - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_pre_op_no_zones(self, mock_load): - policy = zp.ZonePlacementPolicy('p1', self.spec) - nc = mock.Mock() - nc.validate_azs.return_value = [] - policy._novaclient = nc - - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - context=self.context, - data={'creation': {'count': 3}}) - - cluster = mock.Mock() - mock_load.return_value = cluster - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - self.assertEqual('ERROR', action.data['status']) - self.assertEqual('No availability zone found available.', - action.data['reason']) - - @mock.patch.object(cluster_mod.Cluster, 'load') - def test_pre_op_no_feasible_plan(self, mock_load): - policy = zp.ZonePlacementPolicy('p1', self.spec) - zones = policy.zones - - nc = mock.Mock() - nc.validate_azs.return_value = zones.keys() - policy._novaclient = nc - - self.patchobject(policy, '_create_plan', return_value=None) - - action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, - context=self.context, inputs={}, - data={'creation': {'count': 3}}) - - cluster = mock.Mock() - current_dist = {'R1': 0, 'R2': 0, 'R3': 0, 'R4': 0} - cluster.get_zone_distribution.return_value = current_dist - mock_load.return_value = cluster - - res = policy.pre_op('FAKE_CLUSTER', action) - - self.assertIsNone(res) - - self.assertEqual('ERROR', action.data['status']) - self.assertEqual('There is no feasible plan to handle all nodes.', - action.data['reason']) - - mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER') - nc.validate_azs.assert_called_once_with(zones.keys()) - cluster.get_zone_distribution.assert_called_once_with( - action.context, zones.keys()) - policy._create_plan.assert_called_once_with( - current_dist, zones, 3, True) diff --git a/senlin/tests/unit/profiles/__init__.py b/senlin/tests/unit/profiles/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/senlin/tests/unit/profiles/test_container_docker.py b/senlin/tests/unit/profiles/test_container_docker.py deleted file mode 100644 index e75ad60d4..000000000 --- a/senlin/tests/unit/profiles/test_container_docker.py +++ /dev/null @@ -1,817 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - - -from senlin.common import context -from senlin.common import exception as exc -from senlin.common.i18n import _ -from senlin.db.sqlalchemy import api as db_api -from senlin.engine import cluster -from senlin.engine import node -from senlin.objects import cluster as co -from senlin.objects import node as no -from senlin.profiles import base as pb -from senlin.profiles.container import docker as dp -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestContainerDockerProfile(base.SenlinTestCase): - - def setUp(self): - super(TestContainerDockerProfile, self).setUp() - - self.context = utils.dummy_context() - self.spec = { - 'type': 'container.dockerinc.docker', - 'version': '1.0', - 'properties': { - 'context': { - 'region_name': 'RegionOne' - }, - 'name': 'docker_container', - 'image': 'hello-world', - 'command': '/bin/sleep 30', - 'port': 2375, - 'host_node': 'fake_node', - } - } - - def test_init(self): - profile = dp.DockerProfile('t', self.spec) - self.assertIsNone(profile._dockerclient) - self.assertIsNone(profile.container_id) - self.assertIsNone(profile.host) - - @mock.patch.object(dp.DockerProfile, 'do_validate') - @mock.patch.object(db_api, 'node_add_dependents') - @mock.patch.object(db_api, 'cluster_add_dependents') - def test_create_with_host_node(self, mock_cadd, mock_nadd, mock_validate): - mock_validate.return_value = None - - profile = dp.DockerProfile.create( - self.context, 'fake_name', self.spec) - - self.assertIsNotNone(profile) - mock_nadd.assert_called_once_with(self.context, 'fake_node', - profile.id, 'profile') - self.assertEqual(0, mock_cadd.call_count) - - @mock.patch.object(dp.DockerProfile, 'do_validate') - @mock.patch.object(db_api, 'node_add_dependents') - @mock.patch.object(db_api, 'cluster_add_dependents') - def test_create_with_host_cluster(self, mock_cadd, mock_nadd, - mock_validate): - mock_validate.return_value = None - spec = copy.deepcopy(self.spec) - del spec['properties']['host_node'] - spec['properties']['host_cluster'] = 'fake_cluster' - - profile = dp.DockerProfile.create( - self.context, 'fake_name', spec) - - self.assertIsNotNone(profile) - mock_cadd.assert_called_once_with(self.context, 'fake_cluster', - profile.id) - self.assertEqual(0, mock_nadd.call_count) - - @mock.patch.object(pb.Profile, 'delete') - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(db_api, 'node_remove_dependents') - @mock.patch.object(db_api, 'cluster_remove_dependents') - def test_delete_with_host_node(self, mock_cdel, mock_ndel, mock_load, - mock_delete): - profile = dp.DockerProfile('t', self.spec) - mock_load.return_value = profile - - res = dp.DockerProfile.delete(self.context, 'FAKE_ID') - - self.assertIsNone(res) - mock_load.assert_called_once_with(self.context, profile_id='FAKE_ID') - mock_ndel.assert_called_once_with(self.context, 'fake_node', - 'FAKE_ID', 'profile') - self.assertEqual(0, mock_cdel.call_count) - mock_delete.assert_called_once_with(self.context, 'FAKE_ID') - - @mock.patch.object(pb.Profile, 'delete') - @mock.patch.object(pb.Profile, 'load') - @mock.patch.object(db_api, 'node_remove_dependents') - @mock.patch.object(db_api, 'cluster_remove_dependents') - def test_delete_with_host_cluster(self, mock_cdel, mock_ndel, mock_load, - mock_delete): - spec = copy.deepcopy(self.spec) - del spec['properties']['host_node'] - spec['properties']['host_cluster'] = 'fake_cluster' - profile = dp.DockerProfile('fake_name', spec) - mock_load.return_value = profile - - res = dp.DockerProfile.delete(self.context, 'FAKE_ID') - - self.assertIsNone(res) - mock_load.assert_called_once_with(self.context, profile_id='FAKE_ID') - mock_cdel.assert_called_once_with(self.context, 'fake_cluster', - 'FAKE_ID') - self.assertEqual(0, mock_ndel.call_count) - - @mock.patch('senlin.drivers.container.docker_v1.DockerClient') - @mock.patch.object(dp.DockerProfile, '_get_host_ip') - @mock.patch.object(dp.DockerProfile, '_get_host') - @mock.patch.object(context, 'get_admin_context') - def test_docker_client(self, mock_ctx, mock_host, mock_ip, mock_client): - ctx = mock.Mock() - mock_ctx.return_value = ctx - profile = mock.Mock(type_name='os.nova.server') - host = mock.Mock(rt={'profile': profile}, physical_id='server1') - mock_host.return_value = host - fake_ip = '1.2.3.4' - mock_ip.return_value = fake_ip - dockerclient = mock.Mock() - mock_client.return_value = dockerclient - profile = dp.DockerProfile('container', self.spec) - obj = mock.Mock() - client = profile.docker(obj) - self.assertEqual(dockerclient, client) - mock_host.assert_called_once_with(ctx, 'fake_node', None) - mock_ip.assert_called_once_with(obj, 'server1', 'os.nova.server') - url = 'tcp://1.2.3.4:2375' - mock_client.assert_called_once_with(url) - - @mock.patch.object(dp.DockerProfile, '_get_host') - def test_docker_client_wrong_host_type(self, mock_get): - profile = mock.Mock(type_name='wrong_type') - host = mock.Mock(rt={'profile': profile}, physical_id='server1') - mock_get.return_value = host - obj = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - ex = self.assertRaises(exc.InternalError, - profile.docker, obj) - msg = _('Type of host node (wrong_type) is not supported') - - self.assertEqual(msg, ex.message) - - @mock.patch.object(dp.DockerProfile, '_get_host_ip') - @mock.patch.object(dp.DockerProfile, '_get_host') - def test_docker_client_get_host_ip_failed(self, mock_host, mock_ip): - profile = mock.Mock(type_name='os.nova.server') - host = mock.Mock(rt={'profile': profile}, physical_id='server1') - mock_host.return_value = host - mock_ip.return_value = None - obj = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - ex = self.assertRaises(exc.InternalError, - profile.docker, obj) - msg = _('Unable to determine the IP address of host node') - - self.assertEqual(msg, ex.message) - - @mock.patch.object(node.Node, 'load') - def test_get_host_node_found_by_node(self, mock_load): - node = mock.Mock() - mock_load.return_value = node - ctx = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - - res = profile._get_host(ctx, 'host_node', None) - - self.assertEqual(node, res) - mock_load.assert_called_once_with(ctx, node_id='host_node') - - @mock.patch.object(dp.DockerProfile, '_get_random_node') - def test_get_host_node_found_by_cluster(self, mock_get): - node = mock.Mock() - mock_get.return_value = node - ctx = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - - res = profile._get_host(ctx, None, 'host_cluster') - - self.assertEqual(node, res) - mock_get.assert_called_once_with(ctx, 'host_cluster') - - @mock.patch.object(node.Node, 'load') - def test_get_host_node_not_found(self, mock_load): - mock_load.side_effect = exc.ResourceNotFound(type='node', - id='fake_node') - profile = dp.DockerProfile('container', self.spec) - ctx = mock.Mock() - - ex = self.assertRaises(exc.InternalError, - profile._get_host, - ctx, 'fake_node', None) - - msg = _("The host node 'fake_node' could not be found.") - self.assertEqual(msg, ex.message) - - @mock.patch.object(node.Node, 'load') - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(cluster.Cluster, 'load') - def test_get_random_node(self, mock_cluster, mock_nodes, mock_load): - cluster = mock.Mock() - mock_cluster.return_value = cluster - node1 = mock.Mock() - node2 = mock.Mock() - mock_nodes.return_value = [node1, node2] - profile = dp.DockerProfile('container', self.spec) - ctx = mock.Mock() - x_node = mock.Mock() - mock_load.return_value = x_node - - res = profile._get_random_node(ctx, 'host_cluster') - - self.assertEqual(x_node, res) - mock_cluster.assert_called_once_with(ctx, cluster_id='host_cluster') - mock_nodes.assert_called_once_with(ctx, cluster_id='host_cluster', - filters={'status': 'ACTIVE'}) - mock_load.assert_called_once_with(ctx, db_node=mock.ANY) - n = mock_load.call_args[1]['db_node'] - self.assertIn(n, [node1, node2]) - - @mock.patch.object(cluster.Cluster, 'load') - def test_get_random_node_cluster_not_found(self, mock_load): - mock_load.side_effect = exc.ResourceNotFound(type='cluster', - id='host_cluster') - ctx = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.InternalError, - profile._get_random_node, - ctx, 'host_cluster') - - msg = _("The host cluster 'host_cluster' could not be found.") - self.assertEqual(msg, ex.message) - - @mock.patch.object(no.Node, 'get_all_by_cluster') - @mock.patch.object(cluster.Cluster, 'load') - def test_get_random_node_empty_cluster(self, mock_cluster, mock_nodes): - cluster = mock.Mock() - mock_cluster.return_value = cluster - mock_nodes.return_value = [] - profile = dp.DockerProfile('container', self.spec) - ctx = mock.Mock() - - ex = self.assertRaises(exc.InternalError, - profile._get_random_node, - ctx, 'host_cluster') - - msg = _('The cluster (host_cluster) contains no active nodes') - self.assertEqual(msg, ex.message) - mock_nodes.assert_called_once_with(ctx, cluster_id='host_cluster', - filters={'status': 'ACTIVE'}) - - def test_get_host_ip_nova_server(self): - addresses = { - 'private': [{'version': 4, 'OS-EXT-IPS:type': 'fixed', - 'addr': '1.2.3.4'}] - } - server = mock.Mock(addresses=addresses) - cc = mock.Mock() - cc.server_get.return_value = server - profile = dp.DockerProfile('container', self.spec) - profile._computeclient = cc - obj = mock.Mock() - host_ip = profile._get_host_ip(obj, 'fake_node', 'os.nova.server') - self.assertEqual('1.2.3.4', host_ip) - cc.server_get.assert_called_once_with('fake_node') - - def test_get_host_ip_heat_stack(self): - oc = mock.Mock() - stack = mock.Mock( - outputs=[{'output_key': 'fixed_ip', 'output_value': '1.2.3.4'}] - ) - oc.stack_get.return_value = stack - profile = dp.DockerProfile('container', self.spec) - profile._orchestrationclient = oc - obj = mock.Mock() - - host_ip = profile._get_host_ip(obj, 'fake_node', 'os.heat.stack') - - self.assertEqual('1.2.3.4', host_ip) - oc.stack_get.assert_called_once_with('fake_node') - - def test_get_host_ip_heat_stack_no_outputs(self): - oc = mock.Mock() - stack = mock.Mock(outputs=None) - oc.stack_get.return_value = stack - profile = dp.DockerProfile('container', self.spec) - profile._orchestrationclient = oc - obj = mock.Mock() - - ex = self.assertRaises(exc.InternalError, - profile._get_host_ip, - obj, 'fake_node', 'os.heat.stack') - - msg = _("Output 'fixed_ip' is missing from the provided stack node") - - self.assertEqual(msg, ex.message) - - def test_do_validate_with_cluster_and_node(self): - spec = copy.deepcopy(self.spec) - spec['properties']['host_cluster'] = 'fake_cluster' - obj = mock.Mock() - profile = dp.DockerProfile('container', spec) - - ex = self.assertRaises(exc.InvalidSpec, - profile.do_validate, obj) - - self.assertEqual("Either 'host_cluster' or 'host_node' must be " - "specified, but not both.", str(ex)) - - def test_do_validate_with_neither_cluster_or_node(self): - spec = copy.deepcopy(self.spec) - del spec['properties']['host_node'] - obj = mock.Mock() - profile = dp.DockerProfile('container', spec) - - ex = self.assertRaises(exc.InvalidSpec, - profile.do_validate, obj) - - self.assertEqual("Either 'host_cluster' or 'host_node' must be " - "specified.", str(ex)) - - @mock.patch.object(no.Node, 'find') - def test_do_validate_with_node(self, mock_find): - obj = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - mock_find.return_value = mock.Mock() - - res = profile.do_validate(obj) - - self.assertIsNone(res) - mock_find.assert_called_once_with(profile.context, 'fake_node') - - @mock.patch.object(no.Node, 'find') - def test_do_validate_node_not_found(self, mock_find): - obj = mock.Mock() - profile = dp.DockerProfile('container', self.spec) - mock_find.side_effect = exc.ResourceNotFound(type='node', - id='fake_node') - - ex = self.assertRaises(exc.InvalidSpec, - profile.do_validate, obj) - - self.assertEqual("The specified host_node 'fake_node' could not be " - "found or is not unique.", str(ex)) - mock_find.assert_called_once_with(profile.context, 'fake_node') - - @mock.patch.object(co.Cluster, 'find') - def test_do_validate_with_cluster(self, mock_find): - spec = copy.deepcopy(self.spec) - obj = mock.Mock() - del spec['properties']['host_node'] - spec['properties']['host_cluster'] = 'fake_cluster' - profile = dp.DockerProfile('container', spec) - mock_find.return_value = mock.Mock() - - res = profile.do_validate(obj) - - self.assertIsNone(res) - mock_find.assert_called_once_with(profile.context, 'fake_cluster') - - @mock.patch.object(co.Cluster, 'find') - def test_do_validate_cluster_not_found(self, mock_find): - spec = copy.deepcopy(self.spec) - del spec['properties']['host_node'] - spec['properties']['host_cluster'] = 'fake_cluster' - obj = mock.Mock() - mock_find.side_effect = exc.ResourceNotFound(type='node', - id='fake_cluster') - profile = dp.DockerProfile('container', spec) - - ex = self.assertRaises(exc.InvalidSpec, - profile.do_validate, obj) - - self.assertEqual("The specified host_cluster 'fake_cluster' could " - "not be found or is not unique.", str(ex)) - mock_find.assert_called_once_with(profile.context, 'fake_cluster') - - @mock.patch.object(db_api, 'node_add_dependents') - @mock.patch.object(context, 'get_service_context') - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_create(self, mock_docker, mock_ctx, mock_add): - ctx = mock.Mock() - mock_ctx.return_value = ctx - dockerclient = mock.Mock() - mock_docker.return_value = dockerclient - container = {'Id': 'd' * 64} - dockerclient.container_create.return_value = container - container_id = 'd' * 36 - profile = dp.DockerProfile('container', self.spec) - host = mock.Mock(id='node_id') - profile.host = host - profile.cluster = cluster - profile.id = 'profile_id' - obj = mock.Mock(id='fake_con_id') - - ret_container_id = profile.do_create(obj) - - mock_add.assert_called_once_with(ctx, 'node_id', 'fake_con_id') - self.assertEqual(container_id, ret_container_id) - params = { - 'image': 'hello-world', - 'name': 'docker_container', - 'command': '/bin/sleep 30', - } - dockerclient.container_create.assert_called_once_with(**params) - - @mock.patch.object(context, 'get_service_context') - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_create_failed(self, mock_docker, mock_ctx): - mock_ctx.return_value = mock.Mock() - mock_docker.side_effect = exc.InternalError - - profile = dp.DockerProfile('container', self.spec) - - obj = mock.Mock() - self.assertRaises(exc.EResourceCreation, - profile.do_create, obj) - - mock_ctx.assert_called_once_with(project=obj.project, user=obj.user) - - @mock.patch.object(context, 'get_admin_context') - @mock.patch.object(db_api, 'node_remove_dependents') - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_delete(self, mock_docker, mock_rem, mock_ctx): - obj = mock.Mock(id='container1', physical_id='FAKE_PHYID') - dockerclient = mock.Mock() - ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_docker.return_value = dockerclient - host = mock.Mock(dependents={}) - host.id = 'node_id' - profile = dp.DockerProfile('container', self.spec) - profile.host = host - profile.id = 'profile_id' - - res = profile.do_delete(obj) - - self.assertIsNone(res) - mock_rem.assert_called_once_with(ctx, 'node_id', 'container1') - dockerclient.container_delete.assert_any_call('FAKE_PHYID') - - def test_do_delete_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = dp.DockerProfile('container', self.spec) - self.assertIsNone(profile.do_delete(obj)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_delete_failed(self, mock_docker): - obj = mock.Mock(physical_id='FAKE_ID') - mock_docker.side_effect = exc.InternalError - - profile = dp.DockerProfile('container', self.spec) - - self.assertRaises(exc.EResourceDeletion, - profile.do_delete, obj) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_update_name(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - res = docker._update_name(obj, 'NEW_NAME') - - self.assertIsNone(res) - x_docker.rename.assert_called_once_with('FAKE_ID', 'NEW_NAME') - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_update_name_docker_failure(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - x_docker.rename.side_effect = exc.InternalError(message='BOOM') - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceUpdate, - docker._update_name, - obj, 'NEW_NAME') - - self.assertEqual("Failed in updating container 'FAKE_ID': BOOM.", - str(ex)) - x_docker.rename.assert_called_once_with('FAKE_ID', 'NEW_NAME') - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_update(self, mock_docker): - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - new_spec = { - 'type': 'container.dockerinc.docker', - 'version': '1.0', - 'properties': { - 'context': { - 'region_name': 'RegionOne' - }, - 'name': 'new_name', - 'image': 'hello-world', - 'command': '/bin/sleep 30', - 'port': 2375, - 'host_node': 'fake_node', - } - } - new_profile = dp.DockerProfile('u', new_spec) - res = docker.do_update(obj, new_profile) - - self.assertTrue(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_do_update_no_new_profile(self, mock_docker): - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - params = {} - res = docker.do_update(obj, params) - - self.assertFalse(res) - - def test_do_update_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = dp.DockerProfile('container', self.spec) - self.assertFalse(profile.do_update(obj)) - - def test_check_container_name(self): - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - new_spec = { - 'type': 'container.dockerinc.docker', - 'version': '1.0', - 'properties': { - 'context': { - 'region_name': 'RegionOne' - }, - 'name': 'new_name', - 'image': 'hello-world', - 'command': '/bin/sleep 30', - 'port': 2375, - 'host_node': 'fake_node', - } - } - new_profile = dp.DockerProfile('u', new_spec) - res, new_name = docker._check_container_name(obj, new_profile) - - self.assertTrue(res) - - def test_check_container_same_name(self): - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - new_spec = { - 'type': 'container.dockerinc.docker', - 'version': '1.0', - 'properties': { - 'context': { - 'region_name': 'RegionOne' - }, - 'name': 'docker_container', - 'image': 'hello-world', - 'command': '/bin/sleep 30', - 'port': 2375, - 'host_node': 'fake_node', - } - } - new_profile = dp.DockerProfile('u', new_spec) - res, new_name = docker._check_container_name(obj, new_profile) - - self.assertFalse(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_reboot(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_reboot(obj) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.restart.assert_called_once_with('FAKE_ID') - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_reboot_with_timeout(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_reboot(obj, timeout=200) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.restart.assert_called_once_with('FAKE_ID', timeout=200) - - def test_handle_reboot_no_physical_id(self): - obj = mock.Mock(physical_id=None) - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_reboot(obj) - - self.assertIsNone(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_reboot_driver_failure(self, mock_docker): - mock_docker.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_reboot, - obj) - - self.assertEqual("Failed in rebooting container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_reboot_docker_failure(self, mock_docker): - x_docker = mock.Mock() - mock_docker.return_value = x_docker - x_docker.restart.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_reboot, - obj) - - self.assertEqual("Failed in rebooting container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_pause(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_pause(obj) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.pause.assert_called_once_with('FAKE_ID') - - def test_handle_pause_no_physical_id(self): - obj = mock.Mock(physical_id=None) - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_pause(obj) - - self.assertIsNone(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_pause_driver_failure(self, mock_docker): - mock_docker.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_pause, - obj) - - self.assertEqual("Failed in pausing container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_pause_docker_failure(self, mock_docker): - x_docker = mock.Mock() - mock_docker.return_value = x_docker - x_docker.pause.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_pause, - obj) - - self.assertEqual("Failed in pausing container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_unpause(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_unpause(obj) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.unpause.assert_called_once_with('FAKE_ID') - - def test_handle_unpause_no_physical_id(self): - obj = mock.Mock(physical_id=None) - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_unpause(obj) - - self.assertIsNone(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_unpause_driver_failure(self, mock_docker): - mock_docker.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_unpause, - obj) - - self.assertEqual("Failed in unpausing container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_unpause_docker_failure(self, mock_docker): - x_docker = mock.Mock() - mock_docker.return_value = x_docker - x_docker.unpause.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_unpause, - obj) - - self.assertEqual("Failed in unpausing container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_stop(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - params = {'timeout': None} - res = docker.handle_stop(obj, **params) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.stop.assert_called_once_with('FAKE_ID', **params) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_stop_with_timeout(self, mock_docker): - x_docker = mock.Mock() - x_docker = mock_docker.return_value - obj = mock.Mock(physical_id='FAKE_ID') - - docker = dp.DockerProfile('container', self.spec) - params = {'timeout': 200} - res = docker.handle_stop(obj, **params) - - self.assertIsNone(res) - mock_docker.assert_called_once_with(obj) - x_docker.stop.assert_called_once_with('FAKE_ID', **params) - - def test_handle_stop_no_physical_id(self): - obj = mock.Mock(physical_id=None) - docker = dp.DockerProfile('container', self.spec) - - res = docker.handle_stop(obj) - - self.assertIsNone(res) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_stop_driver_failure(self, mock_docker): - mock_docker.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_stop, - obj) - - self.assertEqual("Failed in stop container 'FAKE_ID': " - "Boom.", str(ex)) - - @mock.patch.object(dp.DockerProfile, 'docker') - def test_handle_stop_docker_failure(self, mock_docker): - x_docker = mock.Mock() - mock_docker.return_value = x_docker - x_docker.stop.side_effect = exc.InternalError(message="Boom") - obj = mock.Mock(physical_id='FAKE_ID') - docker = dp.DockerProfile('container', self.spec) - - ex = self.assertRaises(exc.EResourceOperation, - docker.handle_stop, - obj) - - self.assertEqual("Failed in stop container 'FAKE_ID': " - "Boom.", str(ex)) diff --git a/senlin/tests/unit/profiles/test_heat_stack.py b/senlin/tests/unit/profiles/test_heat_stack.py deleted file mode 100644 index bd1be1cd8..000000000 --- a/senlin/tests/unit/profiles/test_heat_stack.py +++ /dev/null @@ -1,1008 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - - -from senlin.common import exception as exc -from senlin.profiles.os.heat import stack -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestHeatStackProfile(base.SenlinTestCase): - - def setUp(self): - super(TestHeatStackProfile, self).setUp() - self.context = utils.dummy_context() - self.spec = { - 'type': 'os.heat.stack', - 'version': '1.0', - 'properties': { - 'template': {"Template": "data"}, - 'template_url': '/test_uri', - 'context': {}, - 'parameters': {'foo': 'bar'}, - 'files': {}, - 'timeout': 60, - 'disable_rollback': True, - 'environment': {} - } - } - - def test_stack_init(self): - profile = stack.StackProfile('t', self.spec) - self.assertIsNone(profile.stack_id) - - def test_do_validate(self): - oc = mock.Mock() - profile = stack.StackProfile('t', self.spec) - profile._orchestrationclient = oc - node_obj = mock.Mock(user='fake_user', project='fake_project') - - res = profile.do_validate(node_obj) - - props = self.spec['properties'] - call_args = { - 'stack_name': mock.ANY, - 'template': props['template'], - 'template_url': props['template_url'], - 'parameters': props['parameters'], - 'files': props['files'], - 'environment': props['environment'], - 'preview': True, - } - self.assertTrue(res) - oc.stack_create.assert_called_once_with(**call_args) - - def test_do_validate_fails(self): - oc = mock.Mock() - profile = stack.StackProfile('t', self.spec) - profile._orchestrationclient = oc - err = exc.InternalError(code=400, message='Boom') - oc.stack_create = mock.Mock(side_effect=err) - node_obj = mock.Mock() - node_obj.name = 'stack_node' - - ex = self.assertRaises(exc.InvalidSpec, - profile.do_validate, node_obj) - - props = self.spec['properties'] - call_args = { - 'stack_name': mock.ANY, - 'template': props['template'], - 'template_url': props['template_url'], - 'parameters': props['parameters'], - 'files': props['files'], - 'environment': props['environment'], - 'preview': True, - } - oc.stack_create.assert_called_once_with(**call_args) - self.assertEqual('Failed in validating template: Boom', - str(ex)) - - def test_do_create(self): - oc = mock.Mock() - profile = stack.StackProfile('t', self.spec) - profile._orchestrationclient = oc - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - node.name = 'test_node' - fake_stack = mock.Mock(id='FAKE_ID') - oc.stack_create = mock.Mock(return_value=fake_stack) - - # do it - res = profile.do_create(node) - - # assertions - kwargs = { - 'stack_name': mock.ANY, - 'template': self.spec['properties']['template'], - 'template_url': self.spec['properties']['template_url'], - 'timeout_mins': self.spec['properties']['timeout'], - 'disable_rollback': self.spec['properties']['disable_rollback'], - 'parameters': self.spec['properties']['parameters'], - 'files': self.spec['properties']['files'], - 'environment': self.spec['properties']['environment'], - 'tags': ",".join(['cluster_node_id=NODE_ID', - 'cluster_id=CLUSTER_ID', - 'cluster_node_index=123']) - } - self.assertEqual('FAKE_ID', res) - oc.stack_create.assert_called_once_with(**kwargs) - oc.wait_for_stack.assert_called_once_with('FAKE_ID', 'CREATE_COMPLETE', - timeout=3600) - - def test_do_create_with_template_url(self): - spec = { - 'type': 'os.heat.stack', - 'version': '1.0', - 'properties': { - 'template': {}, - 'template_url': '/test_uri', - 'context': {}, - 'parameters': {'foo': 'bar'}, - 'files': {}, - 'timeout': 60, - 'disable_rollback': True, - 'environment': {} - } - } - oc = mock.Mock() - profile = stack.StackProfile('t', spec) - profile._orchestrationclient = oc - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - node.name = 'test_node' - fake_stack = mock.Mock(id='FAKE_ID') - oc.stack_create = mock.Mock(return_value=fake_stack) - - # do it - res = profile.do_create(node) - - # assertions - kwargs = { - 'stack_name': mock.ANY, - 'template': spec['properties']['template'], - 'template_url': spec['properties']['template_url'], - 'timeout_mins': spec['properties']['timeout'], - 'disable_rollback': spec['properties']['disable_rollback'], - 'parameters': spec['properties']['parameters'], - 'files': spec['properties']['files'], - 'environment': spec['properties']['environment'], - 'tags': ",".join(['cluster_node_id=NODE_ID', - 'cluster_id=CLUSTER_ID', - 'cluster_node_index=123']) - } - self.assertEqual('FAKE_ID', res) - oc.stack_create.assert_called_once_with(**kwargs) - oc.wait_for_stack.assert_called_once_with('FAKE_ID', 'CREATE_COMPLETE', - timeout=3600) - - def test_do_create_default_timeout(self): - spec = copy.deepcopy(self.spec) - del spec['properties']['timeout'] - profile = stack.StackProfile('t', spec) - oc = mock.Mock() - profile._orchestrationclient = oc - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - node.name = 'test_node' - fake_stack = mock.Mock(id='FAKE_ID') - - oc.stack_create = mock.Mock(return_value=fake_stack) - oc.wait_for_stack = mock.Mock() - - # do it - res = profile.do_create(node) - - # assertions - self.assertEqual('FAKE_ID', res) - kwargs = { - 'stack_name': mock.ANY, - 'template': self.spec['properties']['template'], - 'template_url': self.spec['properties']['template_url'], - 'timeout_mins': None, - 'disable_rollback': self.spec['properties']['disable_rollback'], - 'parameters': self.spec['properties']['parameters'], - 'files': self.spec['properties']['files'], - 'environment': self.spec['properties']['environment'], - 'tags': ",".join(['cluster_node_id=NODE_ID', - 'cluster_id=CLUSTER_ID', - 'cluster_node_index=123']) - } - oc.stack_create.assert_called_once_with(**kwargs) - oc.wait_for_stack.assert_called_once_with('FAKE_ID', 'CREATE_COMPLETE', - timeout=None) - - def test_do_create_failed_create(self): - oc = mock.Mock() - profile = stack.StackProfile('t', self.spec) - - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - node.name = 'test_node' - err = exc.InternalError(code=400, message='Too Bad') - oc.stack_create = mock.Mock(side_effect=err) - profile._orchestrationclient = oc - - # do it - ex = self.assertRaises(exc.EResourceCreation, - profile.do_create, - node) - - # assertions - self.assertEqual('Failed in creating stack: Too Bad.', - str(ex)) - call_args = { - 'stack_name': mock.ANY, - 'template': self.spec['properties']['template'], - 'template_url': self.spec['properties']['template_url'], - 'timeout_mins': self.spec['properties']['timeout'], - 'disable_rollback': self.spec['properties']['disable_rollback'], - 'parameters': self.spec['properties']['parameters'], - 'files': self.spec['properties']['files'], - 'environment': self.spec['properties']['environment'], - 'tags': ",".join(['cluster_node_id=NODE_ID', - 'cluster_id=CLUSTER_ID', - 'cluster_node_index=123']) - } - oc.stack_create.assert_called_once_with(**call_args) - self.assertEqual(0, oc.wait_for_stack.call_count) - - def test_do_create_failed_wait(self): - spec = copy.deepcopy(self.spec) - del spec['properties']['timeout'] - profile = stack.StackProfile('t', spec) - oc = mock.Mock() - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - node.name = 'test_node' - fake_stack = mock.Mock(id='FAKE_ID') - - oc.stack_create = mock.Mock(return_value=fake_stack) - err = exc.InternalError(code=400, message='Timeout') - oc.wait_for_stack = mock.Mock(side_effect=err) - profile._orchestrationclient = oc - - # do it - ex = self.assertRaises(exc.EResourceCreation, - profile.do_create, - node) - - # assertions - self.assertEqual('Failed in creating stack: Timeout.', - str(ex)) - kwargs = { - 'stack_name': mock.ANY, - 'template': self.spec['properties']['template'], - 'template_url': self.spec['properties']['template_url'], - 'timeout_mins': None, - 'disable_rollback': self.spec['properties']['disable_rollback'], - 'parameters': self.spec['properties']['parameters'], - 'files': self.spec['properties']['files'], - 'environment': self.spec['properties']['environment'], - 'tags': ",".join(['cluster_node_id=NODE_ID', - 'cluster_id=CLUSTER_ID', - 'cluster_node_index=123']) - } - oc.stack_create.assert_called_once_with(**kwargs) - oc.wait_for_stack.assert_called_once_with('FAKE_ID', 'CREATE_COMPLETE', - timeout=None) - - def test_do_delete(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - test_stack = mock.Mock(physical_id='FAKE_ID') - - # do it - res = profile.do_delete(test_stack) - - # assertions - self.assertTrue(res) - oc.stack_delete.assert_called_once_with('FAKE_ID', True) - oc.wait_for_stack_delete.assert_called_once_with('FAKE_ID') - - def test_do_delete_no_physical_id(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - test_stack = mock.Mock(physical_id=None) - profile._orchestrationclient = oc - - # do it - res = profile.do_delete(test_stack, ignore_missing=False) - - # assertions - self.assertTrue(res) - self.assertFalse(oc.stack_delete.called) - self.assertFalse(oc.wait_for_stack_delete.called) - - def test_do_delete_ignore_missing(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - test_stack = mock.Mock(physical_id='FAKE_ID') - profile._orchestrationclient = oc - - # do it - res = profile.do_delete(test_stack, ignore_missing=False) - - # assertions - self.assertTrue(res) - oc.stack_delete.assert_called_once_with('FAKE_ID', False) - oc.wait_for_stack_delete.assert_called_once_with('FAKE_ID') - - def test_do_delete_failed_deletion(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - err = exc.InternalError(code=400, message='Boom') - oc.stack_delete = mock.Mock(side_effect=err) - test_stack = mock.Mock(physical_id='FAKE_ID') - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, - test_stack) - - # assertions - self.assertEqual("Failed in deleting stack 'FAKE_ID': Boom.", - str(ex)) - oc.stack_delete.assert_called_once_with('FAKE_ID', True) - self.assertEqual(0, oc.wait_for_stack_delete.call_count) - - def test_do_delete_failed_timeout(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - test_stack = mock.Mock(physical_id='FAKE_ID') - profile._orchestrationclient = oc - err = exc.InternalError(code=400, message='Boom') - oc.wait_for_stack_delete = mock.Mock(side_effect=err) - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, test_stack) - - # assertions - self.assertEqual("Failed in deleting stack 'FAKE_ID': Boom.", - str(ex)) - oc.stack_delete.assert_called_once_with('FAKE_ID', True) - oc.wait_for_stack_delete.assert_called_once_with('FAKE_ID') - - def test_do_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - test_stack = mock.Mock(physical_id='FAKE_ID') - new_spec = { - 'type': 'os.heat.stack', - 'version': '1.0', - 'properties': { - 'template': {"Template": "data update"}, - 'context': {}, - 'parameters': {'new': 'params'}, - 'files': {'file1': 'new_content'}, - 'timeout': 123, - 'disable_rollback': False, - 'environment': {'foo': 'bar'} - } - } - new_profile = stack.StackProfile('u', new_spec) - - # do it - res = profile.do_update(test_stack, new_profile) - - # assertions - self.assertTrue(res) - kwargs = { - 'template': {'Template': 'data update'}, - 'parameters': {'new': 'params'}, - 'timeout_mins': 123, - 'disable_rollback': False, - 'files': {'file1': 'new_content'}, - 'environment': {'foo': 'bar'}, - } - oc.stack_update.assert_called_once_with('FAKE_ID', **kwargs) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_no_physical_stack(self): - profile = stack.StackProfile('t', self.spec) - test_stack = mock.Mock(physical_id=None) - new_profile = mock.Mock() - - res = profile.do_update(test_stack, new_profile) - - self.assertFalse(res) - - def test_do_update_only_template(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['template'] = {"Template": "data update"} - new_profile = stack.StackProfile('u', new_spec) - - res = profile.do_update(stack_obj, new_profile) - - self.assertTrue(res) - oc.stack_update.assert_called_once_with( - 'FAKE_ID', template={"Template": "data update"}) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_only_params(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['parameters'] = {"new": "params"} - new_profile = stack.StackProfile('u', new_spec) - - res = profile.do_update(stack_obj, new_profile) - - self.assertTrue(res) - oc.stack_update.assert_called_once_with( - 'FAKE_ID', parameters={"new": "params"}) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_with_timeout_value(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['timeout'] = 120 - new_profile = stack.StackProfile('u', new_spec) - - # do it - res = profile.do_update(stack_obj, new_profile) - - # assertions - self.assertTrue(res) - oc.stack_update.assert_called_once_with('FAKE_ID', timeout_mins=120) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_disable_rollback(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['disable_rollback'] = False - new_profile = stack.StackProfile('u', new_spec) - - # do it - res = profile.do_update(stack_obj, new_profile) - - # assertions - self.assertTrue(res) - oc.stack_update.assert_called_once_with('FAKE_ID', - disable_rollback=False) - oc.wait_for_stack.assert_called_once_with('FAKE_ID', 'UPDATE_COMPLETE', - timeout=3600) - - def test_do_update_files(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['files'] = {"new": "file1"} - new_profile = stack.StackProfile('u', new_spec) - - # do it - res = profile.do_update(stack_obj, new_profile) - - # assertions - self.assertTrue(res) - oc.stack_update.assert_called_once_with( - 'FAKE_ID', files={"new": "file1"}) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_environment(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['environment'] = {"new": "env1"} - new_profile = stack.StackProfile('u', new_spec) - - # do it - res = profile.do_update(stack_obj, new_profile) - - # assertions - self.assertTrue(res) - oc.stack_update.assert_called_once_with( - 'FAKE_ID', environment={"new": "env1"}) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - - def test_do_update_no_change(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_profile = stack.StackProfile('u', new_spec) - - res = profile.do_update(stack_obj, new_profile) - - self.assertTrue(res) - self.assertEqual(0, oc.stack_update.call_count) - - def test_do_update_failed_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - oc.stack_update = mock.Mock( - side_effect=exc.InternalError(code=400, message='Failed')) - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['environment'] = {"new": "env1"} - new_profile = stack.StackProfile('u', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - stack_obj, new_profile) - - oc.stack_update.assert_called_once_with( - 'FAKE_ID', environment={"new": "env1"}) - self.assertEqual(0, oc.wait_for_stack.call_count) - self.assertEqual("Failed in updating stack 'FAKE_ID': " - "Failed.", str(ex)) - - def test_do_update_timeout(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - oc.wait_for_stack = mock.Mock( - side_effect=exc.InternalError(code=400, message='Timeout')) - stack_obj = mock.Mock(physical_id='FAKE_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['environment'] = {"new": "env1"} - new_profile = stack.StackProfile('u', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - stack_obj, new_profile) - - oc.stack_update.assert_called_once_with( - 'FAKE_ID', environment={"new": "env1"}) - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'UPDATE_COMPLETE', timeout=3600) - self.assertEqual("Failed in updating stack 'FAKE_ID': " - "Timeout.", str(ex)) - - def test_do_check(self): - node_obj = mock.Mock(physical_id='FAKE_ID') - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - - # do it - res = profile.do_check(node_obj) - - # assertions - self.assertTrue(res) - oc.stack_check.assert_called_once_with('FAKE_ID') - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'CHECK_COMPLETE', timeout=3600) - - def test_do_check_no_physical_id(self): - node_obj = mock.Mock(physical_id=None) - profile = stack.StackProfile('t', self.spec) - - res = profile.do_check(node_obj) - - self.assertFalse(res) - - def test_do_check_failed_checking(self): - node_obj = mock.Mock(physical_id='FAKE_ID') - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - oc.stack_check = mock.Mock( - side_effect=exc.InternalError(code=400, message='BOOM')) - - self.assertRaises(exc.EResourceOperation, profile.do_check, node_obj) - - oc.stack_check.assert_called_once_with('FAKE_ID') - self.assertEqual(0, oc.wait_for_stack.call_count) - - def test_do_check_failed_in_waiting(self): - node_obj = mock.Mock(physical_id='FAKE_ID') - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - oc.wait_for_stack = mock.Mock( - side_effect=exc.InternalError(code=400, message='BOOM')) - - self.assertRaises(exc.EResourceOperation, profile.do_check, node_obj) - - oc.stack_check.assert_called_once_with('FAKE_ID') - oc.wait_for_stack.assert_called_once_with( - 'FAKE_ID', 'CHECK_COMPLETE', timeout=3600) - - def test_do_get_details(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - details = mock.Mock() - details.to_dict.return_value = {'foo': 'bar'} - oc.stack_get = mock.Mock(return_value=details) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_get_details(node_obj) - - self.assertEqual({'foo': 'bar'}, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_no_physical_id(self): - profile = stack.StackProfile('t', self.spec) - node_obj = mock.Mock(physical_id=None) - - res = profile.do_get_details(node_obj) - - self.assertEqual({}, res) - - def test_do_get_details_failed_retrieval(self): - profile = stack.StackProfile('t', self.spec) - node_obj = mock.Mock(physical_id='STACK_ID') - oc = mock.Mock() - oc.stack_get.side_effect = exc.InternalError(message='BOOM') - profile._orchestrationclient = oc - - res = profile.do_get_details(node_obj) - - self.assertEqual({'Error': {'code': 500, 'message': 'BOOM'}}, res) - oc.stack_get.assert_called_once_with('STACK_ID') - - def test_do_adopt(self): - profile = stack.StackProfile('t', self.spec) - x_stack = mock.Mock( - parameters={'p1': 'v1', 'OS::stack_id': 'FAKE_ID'}, - timeout_mins=123, - is_rollback_disabled=False - ) - oc = mock.Mock() - oc.stack_get = mock.Mock(return_value=x_stack) - - # mock template - templ = mock.Mock() - templ.to_dict.return_value = {'foo': 'bar'} - oc.stack_get_template = mock.Mock(return_value=templ) - - # mock environment - env = mock.Mock() - env.to_dict.return_value = {'ke': 've'} - oc.stack_get_environment = mock.Mock(return_value=env) - - oc.stack_get_files = mock.Mock(return_value={'fn': 'content'}) - profile._orchestrationclient = oc - - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = { - 'environment': {'ke': 've'}, - 'files': {'fn': 'content'}, - 'template': {'foo': 'bar'}, - 'parameters': {'p1': 'v1'}, - 'timeout': 123, - 'disable_rollback': False - } - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - oc.stack_get_template.assert_called_once_with('FAKE_ID') - oc.stack_get_environment.assert_called_once_with('FAKE_ID') - oc.stack_get_files.assert_called_once_with('FAKE_ID') - - def test_do_adopt_failed_get(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - oc.stack_get.side_effect = exc.InternalError(message='BOOM') - profile._orchestrationclient = oc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = {'Error': {'code': 500, 'message': 'BOOM'}} - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - - def test_do_adopt_failed_get_template(self): - profile = stack.StackProfile('t', self.spec) - x_stack = mock.Mock() - oc = mock.Mock() - oc.stack_get = mock.Mock(return_value=x_stack) - oc.stack_get_template.side_effect = exc.InternalError(message='BOOM') - profile._orchestrationclient = oc - - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = {'Error': {'code': 500, 'message': 'BOOM'}} - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - oc.stack_get_template.assert_called_once_with('FAKE_ID') - - def test_do_adopt_failed_get_environment(self): - profile = stack.StackProfile('t', self.spec) - x_stack = mock.Mock() - oc = mock.Mock() - oc.stack_get = mock.Mock(return_value=x_stack) - oc.stack_get_template = mock.Mock(return_value={'foo': 'bar'}) - err = exc.InternalError(message='BOOM') - oc.stack_get_environment.side_effect = err - profile._orchestrationclient = oc - - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = {'Error': {'code': 500, 'message': 'BOOM'}} - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - oc.stack_get_template.assert_called_once_with('FAKE_ID') - oc.stack_get_environment.assert_called_once_with('FAKE_ID') - - def test_do_adopt_failed_get_files(self): - profile = stack.StackProfile('t', self.spec) - x_stack = mock.Mock() - oc = mock.Mock() - oc.stack_get = mock.Mock(return_value=x_stack) - oc.stack_get_template = mock.Mock(return_value={'foo': 'bar'}) - oc.stack_get_environment = mock.Mock(return_value={'ke': 've'}) - oc.stack_get_files.side_effect = exc.InternalError(message='BOOM') - profile._orchestrationclient = oc - - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = {'Error': {'code': 500, 'message': 'BOOM'}} - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - oc.stack_get_template.assert_called_once_with('FAKE_ID') - oc.stack_get_environment.assert_called_once_with('FAKE_ID') - oc.stack_get_files.assert_called_once_with('FAKE_ID') - - def test_do_adopt_with_overrides(self): - profile = stack.StackProfile('t', self.spec) - x_stack = mock.Mock( - parameters={'p1': 'v1', 'OS::stack_id': 'FAKE_ID'}, - timeout_mins=123, - is_rollback_disabled=False - ) - oc = mock.Mock() - oc.stack_get = mock.Mock(return_value=x_stack) - - # mock environment - env = mock.Mock() - env.to_dict.return_value = {'ke': 've'} - oc.stack_get_environment = mock.Mock(return_value=env) - - # mock template - templ = mock.Mock() - templ.to_dict.return_value = {'foo': 'bar'} - oc.stack_get_template = mock.Mock(return_value=templ) - - oc.stack_get_files = mock.Mock(return_value={'fn': 'content'}) - profile._orchestrationclient = oc - - node_obj = mock.Mock(physical_id='FAKE_ID') - overrides = {'environment': {'ENV': 'SETTING'}} - res = profile.do_adopt(node_obj, overrides=overrides) - - expected = { - 'environment': {'ENV': 'SETTING'}, - 'files': {'fn': 'content'}, - 'template': {'foo': 'bar'}, - 'parameters': {'p1': 'v1'}, - 'timeout': 123, - 'disable_rollback': False - } - self.assertEqual(expected, res) - oc.stack_get.assert_called_once_with('FAKE_ID') - oc.stack_get_template.assert_called_once_with('FAKE_ID') - - def test_refresh_tags_empty_no_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock() - - res = profile._refresh_tags([], node, False) - - self.assertEqual(("", False), res) - - def test_refresh_tags_with_contents_no_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock() - - res = profile._refresh_tags(['foo'], node, False) - - self.assertEqual(('foo', False), res) - - def test_refresh_tags_deleted_no_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock() - - res = profile._refresh_tags(['cluster_id=FOO', 'bar'], node, False) - - self.assertEqual(('bar', True), res) - - def test_refresh_tags_empty_and_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - - res = profile._refresh_tags([], node, True) - - expected = ",".join(['cluster_id=CLUSTER_ID', - 'cluster_node_id=NODE_ID', - 'cluster_node_index=123']) - self.assertEqual((expected, True), res) - - def test_refresh_tags_with_contents_and_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - - res = profile._refresh_tags(['foo'], node, True) - - expected = ",".join(['foo', - 'cluster_id=CLUSTER_ID', - 'cluster_node_id=NODE_ID', - 'cluster_node_index=123']) - self.assertEqual((expected, True), res) - - def test_refresh_tags_deleted_and_add(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - - res = profile._refresh_tags(['cluster_id=FOO', 'bar'], node, True) - - expected = ",".join(['bar', - 'cluster_id=CLUSTER_ID', - 'cluster_node_id=NODE_ID', - 'cluster_node_index=123']) - self.assertEqual((expected, True), res) - - def test_do_join(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('bar', True)) - - res = profile.do_join(node, 'CLUSTER_ID') - - self.assertTrue(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, True) - oc.stack_update.assert_called_once_with('STACK_ID', **{'tags': 'bar'}) - - def test_do_join_no_physical_id(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock(physical_id=None) - - res = profile.do_join(node, 'CLUSTER_ID') - - self.assertFalse(res) - - def test_do_join_failed_get_stack(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - err = exc.InternalError(code=400, message='Boom') - oc.stack_get.side_effect = err - node = mock.Mock(physical_id='STACK_ID') - - res = profile.do_join(node, 'CLUSTER_ID') - - self.assertFalse(res) - oc.stack_get.assert_called_once_with('STACK_ID') - - def test_do_join_no_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('foo', False)) - - res = profile.do_join(node, 'CLUSTER_ID') - - self.assertTrue(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, True) - self.assertEqual(0, oc.stack_update.call_count) - - def test_do_join_failed_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - err = exc.InternalError(code=400, message='Boom') - oc.stack_update.side_effect = err - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('bar', True)) - - res = profile.do_join(node, 'CLUSTER_ID') - - self.assertFalse(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, True) - oc.stack_update.assert_called_once_with('STACK_ID', **{'tags': 'bar'}) - - def test_do_leave(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('bar', True)) - - res = profile.do_leave(node) - - self.assertTrue(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, False) - oc.stack_update.assert_called_once_with('STACK_ID', **{'tags': 'bar'}) - - def test_do_leave_no_physical_id(self): - profile = stack.StackProfile('t', self.spec) - node = mock.Mock(physical_id=None) - - res = profile.do_leave(node) - - self.assertFalse(res) - - def test_do_leave_failed_get_stack(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - err = exc.InternalError(code=400, message='Boom') - oc.stack_get.side_effect = err - node = mock.Mock(physical_id='STACK_ID') - - res = profile.do_leave(node) - - self.assertFalse(res) - oc.stack_get.assert_called_once_with('STACK_ID') - - def test_do_leave_no_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('foo', False)) - - res = profile.do_leave(node) - - self.assertTrue(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, False) - self.assertEqual(0, oc.stack_update.call_count) - - def test_do_leave_failed_update(self): - profile = stack.StackProfile('t', self.spec) - oc = mock.Mock() - profile._orchestrationclient = oc - x_stack = mock.Mock(tags='foo') - oc.stack_get.return_value = x_stack - err = exc.InternalError(code=400, message='Boom') - oc.stack_update.side_effect = err - node = mock.Mock(physical_id='STACK_ID') - mock_tags = self.patchobject(profile, '_refresh_tags', - return_value=('bar', True)) - - res = profile.do_leave(node) - - self.assertFalse(res) - oc.stack_get.assert_called_once_with('STACK_ID') - mock_tags.assert_called_once_with('foo', node, False) - oc.stack_update.assert_called_once_with('STACK_ID', **{'tags': 'bar'}) diff --git a/senlin/tests/unit/profiles/test_nova_server.py b/senlin/tests/unit/profiles/test_nova_server.py deleted file mode 100644 index d2b9ce269..000000000 --- a/senlin/tests/unit/profiles/test_nova_server.py +++ /dev/null @@ -1,2713 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -from unittest import mock - -from oslo_config import cfg -from oslo_utils import encodeutils - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.objects import node as node_ob -from senlin.profiles import base as profiles_base -from senlin.profiles.os.nova import server -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestNovaServerBasic(base.SenlinTestCase): - - def setUp(self): - super(TestNovaServerBasic, self).setUp() - - self.context = utils.dummy_context() - self.spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'context': {}, - 'admin_pass': 'adminpass', - 'auto_disk_config': True, - 'availability_zone': 'FAKE_AZ', - 'config_drive': False, - 'flavor': 'FLAV', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - "metadata": {"meta var": "meta val"}, - 'name': 'FAKE_SERVER_NAME', - 'networks': [{ - 'fixed_ip': 'FAKE_IP', - 'network': 'FAKE_NET', - 'floating_network': 'FAKE_PUBLIC_NET', - }], - 'personality': [{ - 'path': '/etc/motd', - 'contents': 'foo', - }], - 'scheduler_hints': { - 'same_host': 'HOST_ID', - }, - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'user_data': 'FAKE_USER_DATA', - } - } - - def test_init(self): - profile = server.ServerProfile('t', self.spec) - - self.assertIsNone(profile.server_id) - - def test_build_metadata(self): - obj = mock.Mock(id='NODE_ID', cluster_id='') - profile = server.ServerProfile('t', self.spec) - - res = profile._build_metadata(obj, None) - - self.assertEqual({'cluster_node_id': 'NODE_ID'}, res) - - def test_build_metadata_with_inputs(self): - obj = mock.Mock(id='NODE_ID', cluster_id='') - profile = server.ServerProfile('t', self.spec) - - res = profile._build_metadata(obj, {'foo': 'bar'}) - - self.assertEqual({'cluster_node_id': 'NODE_ID', 'foo': 'bar'}, res) - - def test_build_metadata_for_cluster_node(self): - obj = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) - profile = server.ServerProfile('t', self.spec) - - res = profile._build_metadata(obj, None) - - self.assertEqual( - { - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_id': 'NODE_ID', - 'cluster_node_index': '123' - }, - res - ) - - def _stubout_profile(self, profile, - mock_image=False, - mock_flavor=False, - mock_keypair=False, - mock_net=False, - mock_volume_type=False): - if mock_image: - image = mock.Mock(id='FAKE_IMAGE_ID') - self.patchobject(profile, '_validate_image', return_value=image) - - if mock_flavor: - flavor = mock.Mock(id='FAKE_FLAVOR_ID') - self.patchobject(profile, '_validate_flavor', return_value=flavor) - - if mock_keypair: - keypair = mock.Mock() - keypair.name = 'FAKE_KEYNAME' - self.patchobject(profile, '_validate_keypair', - return_value=keypair) - if mock_net: - fake_net = { - 'fixed_ip': 'FAKE_IP', - 'port': 'FAKE_PORT', - 'uuid': 'FAKE_NETWORK_ID', - 'floating_network': 'FAKE_PUBLIC_NET_ID', - } - self.patchobject(profile, '_validate_network', - return_value=fake_net) - fake_ports = [{ - 'id': 'FAKE_PORT' - }] - self.patchobject(profile, '_create_ports_from_properties', - return_value=fake_ports) - if mock_volume_type: - fake_volume_type = mock.Mock() - fake_volume_type.id = '588854a9' - fake_volume_type.name = 'FAKE_VOLUME_TYPE' - self.patchobject(profile, '_validate_volume_type', - return_value=fake_volume_type) - - def test_do_create(self): - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - # do it - server_id = profile.do_create(node_obj) - - # assertion - attrs = dict( - adminPass='adminpass', - availability_zone='AZ1', - config_drive=False, - flavorRef='FAKE_FLAVOR_ID', - imageRef='FAKE_IMAGE_ID', - key_name='FAKE_KEYNAME', - metadata={ - 'cluster_id': 'FAKE_CLUSTER_ID', - 'cluster_node_id': 'FAKE_NODE_ID', - 'cluster_node_index': '123', - 'meta var': 'meta val' - }, - name='FAKE_SERVER_NAME', - networks=[{ - 'port': 'FAKE_PORT', - }], - personality=[{ - 'path': '/etc/motd', - 'contents': 'foo' - }], - scheduler_hints={ - 'same_host': 'HOST_ID', - 'group': 'SERVER_GROUP_1', - }, - security_groups=[{'name': 'HIGH_SECURITY_GROUP'}], - user_data='FAKE_USER_DATA', - ) - - ud = encodeutils.safe_encode('FAKE_USER_DATA') - attrs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) - attrs['OS-DCF:diskConfig'] = 'AUTO' - - cc.server_create.assert_called_once_with(**attrs) - cc.server_get.assert_called_once_with('FAKE_ID') - mock_zone_info.assert_called_once_with(node_obj, fake_server) - self.assertEqual('FAKE_ID', server_id) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_create_fail_create_instance_when_node_longer_exists( - self, mock_node_update): - mock_node_update.side_effect = [ - None, exc.ResourceNotFound(type='Node', id='FAKE_NODE_ID') - ] - - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - profile._rollback_ports = mock.Mock() - profile._rollback_instance = mock.Mock() - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=False) - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - availability_zone="AZ01", - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - self.assertRaises( - exc.ResourceNotFound, profile.do_create, node_obj - ) - - profile._rollback_ports.assert_called_once() - profile._rollback_instance.assert_called_once() - - @mock.patch.object(node_ob.Node, 'update') - def test_do_create_fail_create_port_when_node_longer_exists( - self, mock_node_update): - mock_node_update.side_effect = exc.ResourceNotFound( - type='Node', id='FAKE_NODE_ID' - ) - - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - profile._rollback_ports = mock.Mock() - profile._rollback_instance = mock.Mock() - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=False) - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - - self.assertRaises( - exc.ResourceNotFound, profile.do_create, node_obj - ) - - profile._rollback_ports.assert_called_once() - profile._rollback_instance.assert_not_called() - - def test_do_create_invalid_image(self): - profile = server.ServerProfile('s2', self.spec) - err = exc.EResourceCreation(type='server', message='boom') - mock_image = self.patchobject(profile, '_validate_image', - side_effect=err) - node_obj = mock.Mock() - - self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj) - - mock_image.assert_called_once_with(node_obj, 'FAKE_IMAGE', 'create') - - def test_do_create_bdm_invalid_image(self): - cc = mock.Mock() - nc = mock.Mock() - node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123, - cluster_id='FAKE_CLUSTER_ID') - bdm_v2 = [ - { - 'volume_size': 1, - 'uuid': '6ce0be68', - 'source_type': 'image', - 'destination_type': 'volume', - 'boot_index': 0, - }, - ] - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - profile = server.ServerProfile('s2', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True) - err = exc.EResourceCreation(type='server', message='FOO') - mock_volume = self.patchobject(profile, '_resolve_bdm', - side_effect=err) - - self.assertRaises(exc.EResourceCreation, - profile.do_create, - node_obj) - expected_volume = [{ - 'guest_format': None, - 'boot_index': 0, - 'uuid': '6ce0be68', - 'volume_size': 1, - 'device_name': None, - 'disk_bus': None, - 'source_type': 'image', - 'device_type': None, - 'destination_type': 'volume', - 'delete_on_termination': None, - 'volume_type': None - }] - mock_volume.assert_called_once_with( - node_obj, expected_volume, 'create') - - def test_do_create_bdm_invalid_volume(self): - cc = mock.Mock() - nc = mock.Mock() - node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123, - cluster_id='FAKE_CLUSTER_ID') - bdm_v2 = [ - { - 'volume_size': 1, - 'uuid': '6ce0be68', - 'source_type': 'volume', - 'destination_type': 'volume', - 'boot_index': 0, - }, - ] - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - profile = server.ServerProfile('s2', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True) - err = exc.EResourceCreation(type='server', message='FOO') - mock_volume = self.patchobject(profile, '_resolve_bdm', - side_effect=err) - - self.assertRaises(exc.EResourceCreation, - profile.do_create, - node_obj) - expected_volume = [{ - 'guest_format': None, - 'boot_index': 0, - 'uuid': '6ce0be68', - 'volume_size': 1, - 'device_name': None, - 'disk_bus': None, - 'source_type': 'volume', - 'device_type': None, - 'destination_type': 'volume', - 'delete_on_termination': None, - 'volume_type': None - }] - mock_volume.assert_called_once_with( - node_obj, expected_volume, 'create') - - def test_do_create_bdm_invalid_volume_type(self): - cc = mock.Mock() - nc = mock.Mock() - node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123, - cluster_id='FAKE_CLUSTER_ID') - bdm_v2 = [ - { - 'volume_size': 1, - 'source_type': 'blank', - 'destination_type': 'volume', - 'boot_index': 0, - 'volume_type': '588854a9', - }, - ] - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - profile = server.ServerProfile('s2', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True) - err = exc.EResourceCreation(type='server', message='FOO') - mock_volume = self.patchobject(profile, '_resolve_bdm', - side_effect=err) - - self.assertRaises(exc.EResourceCreation, - profile.do_create, - node_obj) - expected_volume = [{ - 'guest_format': None, - 'boot_index': 0, - 'uuid': None, - 'volume_size': 1, - 'device_name': None, - 'disk_bus': None, - 'source_type': 'blank', - 'device_type': None, - 'destination_type': 'volume', - 'delete_on_termination': None, - 'volume_type': '588854a9' - }] - mock_volume.assert_called_once_with( - node_obj, expected_volume, 'create') - - def test_do_create_invalid_flavor(self): - profile = server.ServerProfile('s2', self.spec) - self._stubout_profile(profile, mock_image=True) - err = exc.EResourceCreation(type='server', message='boom') - mock_flavor = self.patchobject(profile, '_validate_flavor', - side_effect=err) - node_obj = mock.Mock() - - self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj) - - mock_flavor.assert_called_once_with(node_obj, 'FLAV', 'create') - - def test_do_create_invalid_keypair(self): - profile = server.ServerProfile('s2', self.spec) - self._stubout_profile(profile, mock_image=True, mock_flavor=True) - err = exc.EResourceCreation(type='server', message='boom') - mock_kp = self.patchobject(profile, '_validate_keypair', - side_effect=err) - node_obj = mock.Mock() - - self.assertRaises(exc.EResourceCreation, profile.do_create, node_obj) - - mock_kp.assert_called_once_with(node_obj, 'FAKE_KEYNAME', 'create') - - def test_do_create_invalid_network(self): - cc = mock.Mock() - nc = mock.Mock() - node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123, - cluster_id='FAKE_CLUSTER_ID') - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - 'name': 'FAKE_SERVER_NAME', - 'networks': [{ - 'network': 'FAKE_NET' - }] - } - } - - profile = server.ServerProfile('s2', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True) - err = exc.EResourceCreation(type='server', message='FOO') - mock_net = self.patchobject(profile, '_validate_network', - side_effect=err) - - self.assertRaises(exc.EResourceCreation, - profile.do_create, - node_obj) - expect_params = { - 'floating_network': None, - 'network': 'FAKE_NET', - 'vnic_type': None, - 'fixed_ip': None, - 'floating_ip': None, - 'port': None, - 'security_groups': None, - 'subnet': None - } - mock_net.assert_called_once_with( - node_obj, expect_params, 'create') - - def test_do_create_server_attrs_not_defined(self): - cc = mock.Mock() - nc = mock.Mock() - node_obj = mock.Mock(id='FAKE_NODE_ID', data={}, index=123, - cluster_id='FAKE_CLUSTER_ID') - - # Assume image/scheduler_hints/user_data were not defined in spec file - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - } - } - profile = server.ServerProfile('t', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - # do it - server_id = profile.do_create(node_obj) - - # assertions - attrs = { - 'OS-DCF:diskConfig': 'AUTO', - 'flavorRef': 'FAKE_FLAVOR_ID', - 'name': 'FAKE_SERVER_NAME', - 'metadata': { - 'cluster_id': 'FAKE_CLUSTER_ID', - 'cluster_node_id': 'FAKE_NODE_ID', - 'cluster_node_index': '123', - }, - 'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}] - } - - cc.server_create.assert_called_once_with(**attrs) - cc.server_get.assert_called_once_with('FAKE_ID') - mock_zone_info.assert_called_once_with(node_obj, fake_server) - self.assertEqual('FAKE_ID', server_id) - - def test_do_create_obj_name_cluster_id_is_none(self): - cc = mock.Mock() - nc = mock.Mock() - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - } - } - profile = server.ServerProfile('t', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - node_obj = mock.Mock(id='FAKE_NODE_ID', cluster_id='', data={}, - index=None) - node_obj.name = None - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - server_id = profile.do_create(node_obj) - - attrs = { - 'OS-DCF:diskConfig': 'AUTO', - 'flavorRef': 'FAKE_FLAVOR_ID', - 'name': 'FAKE_SERVER_NAME', - 'metadata': {'cluster_node_id': 'FAKE_NODE_ID'}, - 'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}] - } - - cc.server_create.assert_called_once_with(**attrs) - cc.server_get.assert_called_once_with('FAKE_ID') - mock_zone_info.assert_called_once_with(node_obj, fake_server) - self.assertEqual('FAKE_ID', server_id) - - def test_do_create_name_property_is_not_defined(self): - cc = mock.Mock() - nc = mock.Mock() - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'security_groups': ['HIGH_SECURITY_GROUP'], - } - } - profile = server.ServerProfile('t', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - node_obj.name = 'TEST-SERVER' - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - # do it - server_id = profile.do_create(node_obj) - - # assertions - attrs = { - 'OS-DCF:diskConfig': 'AUTO', - 'flavorRef': 'FAKE_FLAVOR_ID', - 'name': 'TEST-SERVER', - 'metadata': {'cluster_node_id': 'NODE_ID'}, - 'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}] - } - - cc.server_create.assert_called_once_with(**attrs) - cc.server_get.assert_called_once_with('FAKE_ID') - mock_zone_info.assert_called_once_with(node_obj, fake_server) - self.assertEqual('FAKE_ID', server_id) - - def test_do_create_bdm_v2(self): - cc = mock.Mock() - nc = mock.Mock() - bdm_v2 = [ - { - 'volume_size': 1, - 'uuid': '6ce0be68', - 'source_type': 'image', - 'destination_type': 'volume', - 'boot_index': 0, - }, - { - 'volume_size': 2, - 'source_type': 'blank', - 'destination_type': 'volume', - } - ] - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - profile = server.ServerProfile('t', spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True, - mock_volume_type=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - node_obj.name = None - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.server_get.return_value = fake_server - - # do it - server_id = profile.do_create(node_obj) - - # assertions - expected_volume = { - 'guest_format': None, - 'boot_index': 0, - 'uuid': '6ce0be68', - 'volume_size': 1, - 'device_name': None, - 'disk_bus': None, - 'source_type': 'image', - 'device_type': None, - 'destination_type': 'volume', - 'delete_on_termination': None, - 'volume_type': None - } - self.assertEqual(expected_volume, - profile.properties['block_device_mapping_v2'][0]) - attrs = { - 'OS-DCF:diskConfig': 'AUTO', - 'flavorRef': 'FAKE_FLAVOR_ID', - 'name': 'FAKE_SERVER_NAME', - 'metadata': {'cluster_node_id': 'NODE_ID'}, - 'security_groups': [{'name': 'HIGH_SECURITY_GROUP'}], - 'block_device_mapping_v2': bdm_v2 - } - cc.server_create.assert_called_once_with(**attrs) - cc.server_get.assert_called_once_with('FAKE_ID') - profile._validate_image.assert_called_once_with( - node_obj, expected_volume['uuid'], 'create') - - mock_zone_info.assert_called_once_with(node_obj, fake_server) - self.assertEqual('FAKE_ID', server_id) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_create_wait_server_timeout(self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - server_obj = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = server_obj - - err = exc.InternalError(code=500, message='TIMEOUT') - cc.wait_for_server.side_effect = err - ex = self.assertRaises(exc.EResourceCreation, profile.do_create, - node_obj) - self.assertEqual('FAKE_ID', ex.resource_id) - self.assertEqual('Failed in creating server: TIMEOUT.', - str(ex)) - mock_node_obj.assert_not_called() - cc.wait_for_server.assert_called_once_with( - 'FAKE_ID', timeout=cfg.CONF.default_nova_timeout) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_create_failed(self, mock_node_obj): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - cc.server_create.side_effect = exc.InternalError( - code=500, message="creation failed.") - - # do it - ex = self.assertRaises(exc.EResourceCreation, profile.do_create, - node_obj) - - # assertions - mock_node_obj.assert_called_once_with(mock.ANY, node_obj.id, - {'data': node_obj.data}) - self.assertEqual('Failed in creating server: creation failed.', - str(ex)) - self.assertIsNone(ex.resource_id) - self.assertEqual(0, cc.wait_for_server.call_count) - self.assertEqual(0, mock_zone_info.call_count) - - @mock.patch.object(node_ob.Node, 'update') - @mock.patch.object(server.ServerProfile, 'do_delete') - def test_do_create_failed_with_server_id(self, mock_profile_delete, - mock_node_obj): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - self._stubout_profile(profile, mock_image=True, mock_flavor=True, - mock_keypair=True, mock_net=True) - mock_zone_info = self.patchobject(profile, '_update_zone_info') - node_obj = mock.Mock(id='FAKE_NODE_ID', index=123, - cluster_id='FAKE_CLUSTER_ID', - data={ - 'placement': { - 'zone': 'AZ1', - 'servergroup': 'SERVER_GROUP_1' - } - }) - node_obj.name = 'TEST_SERVER' - fake_server = mock.Mock(id='FAKE_ID') - cc.server_create.return_value = fake_server - cc.wait_for_server.side_effect = exc.InternalError( - code=500, message="creation failed.") - - # do it - ex = self.assertRaises(exc.EResourceCreation, profile.do_create, - node_obj) - - # assertions - mock_node_obj.assert_not_called() - mock_profile_delete.assert_called_once_with( - node_obj, internal_ports=[{'id': 'FAKE_PORT'}]) - self.assertEqual('Failed in creating server: creation failed.', - str(ex)) - self.assertEqual(1, cc.wait_for_server.call_count) - self.assertEqual(0, mock_zone_info.call_count) - - def test_rollback_ports(self): - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._networkclient = nc - - ports = [ - { - 'id': 'FAKE_PORT_ID', - 'remove': True - }, - { - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }, - { - 'floating': { - 'remove': False, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': False - } - ] - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_ports(node_obj, ports) - - nc.port_delete.assert_called() - nc.floatingip_delete.assert_called_once_with('FAKE_FLOATING_ID') - - def test_rollback_with_no_ports(self): - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._networkclient = nc - - ports = [] - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_ports(node_obj, ports) - - nc.port_delete.assert_not_called() - nc.floatingip_delete.assert_not_called() - - def test_rollback_ports_with_internal_error(self): - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.side_effect = exc.InternalError() - profile = server.ServerProfile('t', self.spec) - profile._networkclient = nc - - ports = [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }] - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_ports(node_obj, ports) - - nc.port_delete.assert_not_called() - nc.floatingip_delete.assert_called_once_with('FAKE_FLOATING_ID') - - def test_rollback_instance(self): - cc = mock.Mock() - cc.port_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - - server_obj = mock.Mock(id='SERVER_ID') - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_instance(node_obj, server_obj) - - cc.server_force_delete.assert_called_once_with('SERVER_ID', True) - - def test_rollback_with_no_instance(self): - cc = mock.Mock() - cc.port_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - - server_obj = None - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_instance(node_obj, server_obj) - - cc.server_force_delete.assert_not_called() - - def test_rollback_instance_with_internal_error(self): - cc = mock.Mock() - cc.server_force_delete.side_effect = exc.InternalError() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - - server_obj = mock.Mock(id='SERVER_ID') - - node_obj = mock.Mock(id='NODE_ID', cluster_id='', index=-1, data={}) - - profile._rollback_instance(node_obj, server_obj) - - cc.server_force_delete.assert_called_once_with('SERVER_ID', True) - - def test_do_delete_ok(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_delete.return_value = None - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - test_server.data = {} - - res = profile.do_delete(test_server) - - self.assertTrue(res) - cc.server_delete.assert_called_once_with('FAKE_ID', True) - cc.wait_for_server_delete.assert_called_once_with( - 'FAKE_ID', timeout=cfg.CONF.default_nova_timeout) - - def test_do_delete_no_physical_id(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - profile._computeclient = cc - - test_server = mock.Mock(physical_id=None) - test_server.data = {} - - # do it - res = profile.do_delete(test_server) - - # assertions - self.assertTrue(res) - self.assertFalse(cc.server_delete.called) - self.assertFalse(cc.wait_for_server_delete.called) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_no_physical_id_with_internal_ports(self, mock_node_obj): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile._computeclient = cc - profile._networkclient = nc - - test_server = mock.Mock(physical_id=None) - test_server.data = {'internal_ports': [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }]} - - # do it - res = profile.do_delete(test_server) - - # assertions - self.assertTrue(res) - mock_node_obj.assert_called_once_with( - mock.ANY, test_server.id, {'data': {'internal_ports': []}}) - self.assertFalse(cc.server_delete.called) - self.assertFalse(cc.wait_for_server_delete.called) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_ports_ok(self, mock_node_obj): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_delete.return_value = None - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile._computeclient = cc - profile._networkclient = nc - - test_server = mock.Mock(physical_id='FAKE_ID') - test_server.Node = mock.Mock() - test_server.data = {'internal_ports': [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }]} - - res = profile.do_delete(test_server) - - self.assertTrue(res) - mock_node_obj.assert_called_once_with( - mock.ANY, test_server.id, {'data': {'internal_ports': []}}) - nc.floatingip_delete.assert_called_once_with('FAKE_FLOATING_ID') - nc.port_delete.assert_called_once_with('FAKE_PORT_ID') - cc.server_delete.assert_called_once_with('FAKE_ID', True) - cc.wait_for_server_delete.assert_called_once_with( - 'FAKE_ID', timeout=cfg.CONF.default_nova_timeout) - - def test_do_delete_ignore_missing_force(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - test_server.data = {} - - res = profile.do_delete(test_server, ignore_missing=False, force=True) - - self.assertTrue(res) - cc.server_force_delete.assert_called_once_with('FAKE_ID', False) - cc.wait_for_server_delete.assert_called_once_with( - 'FAKE_ID', timeout=cfg.CONF.default_nova_timeout) - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_with_delete_failure(self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - - err = exc.InternalError(code=500, message='Nova Error') - cc.server_delete.side_effect = err - obj = mock.Mock(physical_id='FAKE_ID') - obj.data = {'internal_ports': [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }]} - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, obj) - - mock_node_obj.assert_called_once_with(mock.ANY, obj.id, - {'data': obj.data}) - self.assertEqual("Failed in deleting server 'FAKE_ID': " - "Nova Error.", str(ex)) - cc.server_delete.assert_called_once_with('FAKE_ID', True) - self.assertEqual(0, cc.wait_for_server_delete.call_count) - nc.port_delete.assert_called_once_with('FAKE_PORT_ID') - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_with_force_delete_failure(self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - - err = exc.InternalError(code=500, message='Nova Error') - cc.server_force_delete.side_effect = err - obj = mock.Mock(physical_id='FAKE_ID') - obj.data = {} - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, obj, force=True) - - mock_node_obj.assert_not_called() - self.assertEqual("Failed in deleting server 'FAKE_ID': " - "Nova Error.", str(ex)) - cc.server_force_delete.assert_called_once_with('FAKE_ID', True) - self.assertEqual(0, cc.wait_for_server_delete.call_count) - nc.port_delete.assert_not_called() - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_wait_for_server_timeout(self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - - err = exc.InternalError(code=500, message='TIMEOUT') - cc.wait_for_server_delete.side_effect = err - obj = mock.Mock(physical_id='FAKE_ID') - obj.data = {'internal_ports': [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }]} - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, obj, timeout=20) - - mock_node_obj.assert_called_once_with(mock.ANY, obj.id, - {'data': obj.data}) - self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.", - str(ex)) - cc.server_delete.assert_called_once_with('FAKE_ID', True) - cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', - timeout=20) - nc.port_delete.assert_called_once_with('FAKE_PORT_ID') - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_wait_for_server_timeout_delete_ports( - self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - - test_server = mock.Mock(physical_id='FAKE_ID') - test_server.Node = mock.Mock() - test_server.data = {'internal_ports': [{ - 'floating': { - 'remove': True, - 'id': 'FAKE_FLOATING_ID', - }, - 'id': 'FAKE_PORT_ID', - 'remove': True - }]} - - err = exc.InternalError(code=500, message='TIMEOUT') - cc.wait_for_server_delete.side_effect = err - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, test_server, timeout=20) - - self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.", - str(ex)) - mock_node_obj.assert_called_once_with( - mock.ANY, test_server.id, {'data': {'internal_ports': []}}) - cc.server_delete.assert_called_once_with('FAKE_ID', True) - cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', - timeout=20) - nc.port_delete.assert_called_once_with('FAKE_PORT_ID') - - @mock.patch.object(node_ob.Node, 'update') - def test_do_delete_wait_for_server_timeout_no_internal_ports( - self, mock_node_obj): - cc = mock.Mock() - nc = mock.Mock() - nc.port_delete.return_value = None - nc.floatingip_delete.return_value = None - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = nc - - test_server = mock.Mock(physical_id='FAKE_ID') - test_server.Node = mock.Mock() - test_server.data = {} - - err = exc.InternalError(code=500, message='TIMEOUT') - cc.wait_for_server_delete.side_effect = err - - # do it - ex = self.assertRaises(exc.EResourceDeletion, - profile.do_delete, test_server, timeout=20) - - self.assertEqual("Failed in deleting server 'FAKE_ID': TIMEOUT.", - str(ex)) - mock_node_obj.assert_not_called() - cc.server_delete.assert_called_once_with('FAKE_ID', True) - cc.wait_for_server_delete.assert_called_once_with('FAKE_ID', - timeout=20) - nc.port_delete.assert_not_called() - - def test_do_get_details(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - nova_server.to_dict.return_value = { - 'OS-DCF:diskConfig': 'MANUAL', - 'OS-EXT-AZ:availability_zone': 'nova', - 'OS-EXT-STS:power_state': 1, - 'OS-EXT-STS:task_state': None, - 'OS-EXT-STS:vm_state': 'active', - 'OS-SRV-USG:launched_at': 'TIMESTAMP1', - 'OS-SRV-USG:terminated_at': None, - 'accessIPv4': 'FAKE_IPV4', - 'accessIPv6': 'FAKE_IPV6', - 'addresses': { - 'private': [{ - 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5e:00:81', - 'version': 4, - 'addr': '10.0.0.3', - 'OS-EXT-IPS:type': 'fixed' - }] - }, - 'config_drive': True, - 'created': 'CREATED_TIMESTAMP', - 'flavor': { - 'id': '1', - 'name': 'FAKE_FLAVOR', - 'links': [{ - 'href': 'http://url_flavor', - 'rel': 'bookmark' - }] - }, - 'hostId': 'FAKE_HOST_ID', - 'id': 'FAKE_ID', - 'image': { - 'id': 'FAKE_IMAGE', - 'links': [{ - 'href': 'http://url_image', - 'rel': 'bookmark' - }], - }, - 'attached_volumes': [{ - 'id': 'FAKE_VOLUME', - }], - 'key_name': 'FAKE_KEY', - 'links': [{ - 'href': 'http://url1', - 'rel': 'self' - }, { - 'href': 'http://url2', - 'rel': 'bookmark' - }], - 'metadata': {}, - 'name': 'FAKE_NAME', - 'progress': 0, - 'security_groups': [{'name': 'default'}], - 'status': 'FAKE_STATUS', - 'tenant_id': 'FAKE_TENANT', - 'updated': 'UPDATE_TIMESTAMP', - 'user_id': 'FAKE_USER_ID', - } - cc.server_get.return_value = nova_server - cc.flavor_find.return_value = mock.Mock(id='1') - res = profile.do_get_details(node_obj) - expected = { - 'OS-DCF:diskConfig': 'MANUAL', - 'OS-EXT-AZ:availability_zone': 'nova', - 'OS-EXT-STS:power_state': 1, - 'OS-EXT-STS:task_state': '-', - 'OS-EXT-STS:vm_state': 'active', - 'OS-SRV-USG:launched_at': 'TIMESTAMP1', - 'OS-SRV-USG:terminated_at': '-', - 'accessIPv4': 'FAKE_IPV4', - 'accessIPv6': 'FAKE_IPV6', - 'config_drive': True, - 'created': 'CREATED_TIMESTAMP', - 'flavor': '1', - 'hostId': 'FAKE_HOST_ID', - 'id': 'FAKE_ID', - 'image': 'FAKE_IMAGE', - 'attached_volumes': ['FAKE_VOLUME'], - 'key_name': 'FAKE_KEY', - 'metadata': {}, - 'name': 'FAKE_NAME', - 'addresses': { - 'private': [{ - 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5e:00:81', - 'version': 4, - 'addr': '10.0.0.3', - 'OS-EXT-IPS:type': 'fixed' - }] - }, - 'progress': 0, - 'security_groups': 'default', - 'updated': 'UPDATE_TIMESTAMP', - 'status': 'FAKE_STATUS', - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_with_no_network_or_sg(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - nova_server.to_dict.return_value = { - 'addresses': {}, - 'flavor': { - 'id': 'FAKE_FLAVOR', - }, - 'id': 'FAKE_ID', - 'image': { - 'id': 'FAKE_IMAGE', - }, - 'attached_volumes': [{ - 'id': 'FAKE_VOLUME', - }], - 'security_groups': [], - } - cc.server_get.return_value = nova_server - cc.flavor_find.return_value = mock.Mock(id='FAKE_FLAVOR') - res = profile.do_get_details(node_obj) - expected = { - 'flavor': 'FAKE_FLAVOR', - 'id': 'FAKE_ID', - 'image': 'FAKE_IMAGE', - 'attached_volumes': ['FAKE_VOLUME'], - 'addresses': {}, - 'security_groups': '', - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_flavor_no_id_key(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - nova_server.to_dict.return_value = { - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'flavor': { - 'original_name': 'FAKE_FLAVOR', - }, - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': [{ - 'id': 'FAKE_VOLUME', - }], - 'security_groups': [{'name': 'default'}], - } - cc.server_get.return_value = nova_server - cc.flavor_find.return_value = mock.PropertyMock(id='FAKE_FLAVOR_ID') - - res = profile.do_get_details(node_obj) - expected = { - 'flavor': 'FAKE_FLAVOR_ID', - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': ['FAKE_VOLUME'], - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'security_groups': 'default', - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_image_no_id_key(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - nova_server.to_dict.return_value = { - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'flavor': { - 'id': '1', - 'name': 'FAKE_FLAVOR' - }, - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': [{ - 'id': 'FAKE_VOLUME', - }], - 'security_groups': [{'name': 'default'}], - } - cc.server_get.return_value = nova_server - cc.flavor_find.return_value = mock.Mock(id='1') - res = profile.do_get_details(node_obj) - expected = { - 'flavor': '1', - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': ['FAKE_VOLUME'], - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'security_groups': 'default', - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_bdm_no_id_key(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - nova_server.to_dict.return_value = { - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'flavor': { - 'id': 'FAKE_FLAVOR', - }, - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': [], - 'security_groups': [{'name': 'default'}], - } - cc.server_get.return_value = nova_server - cc.flavor_find.return_value = mock.Mock(id='FAKE_FLAVOR') - res = profile.do_get_details(node_obj) - expected = { - 'flavor': 'FAKE_FLAVOR', - 'id': 'FAKE_ID', - 'image': {}, - 'attached_volumes': [], - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }] - }, - 'security_groups': 'default', - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_with_more_network_or_sg(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - # Test normal path - nova_server = mock.Mock() - data = { - 'addresses': { - 'private': [{ - 'version': 4, - 'addr': '10.0.0.3', - }, { - 'version': 4, - 'addr': '192.168.43.3' - }], - 'public': [{ - 'version': 4, - 'addr': '172.16.5.3', - }] - }, - 'flavor': { - 'id': 'FAKE_FLAVOR', - }, - 'id': 'FAKE_ID', - 'image': { - 'id': 'FAKE_IMAGE', - }, - 'attached_volumes': [{ - 'id': 'FAKE_VOLUME', - }], - 'security_groups': [{ - 'name': 'default', - }, { - 'name': 'webserver', - }], - } - nova_server.to_dict.return_value = data - cc.server_get.return_value = nova_server - - res = profile.do_get_details(node_obj) - - self.assertEqual(set(data['addresses']), set(res['addresses'])) - self.assertEqual(set(['default', 'webserver']), - set(res['security_groups'])) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_get_details_no_physical_id(self): - # Test path for server not created - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='') - self.assertEqual({}, profile.do_get_details(node_obj)) - - node_obj.physical_id = None - self.assertEqual({}, profile.do_get_details(node_obj)) - - def test_do_get_details_server_not_found(self): - # Test path for server not created - cc = mock.Mock() - err = exc.InternalError(code=404, message='No Server found for ID') - cc.server_get.side_effect = err - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_get_details(node_obj) - expected = { - 'Error': { - 'message': 'No Server found for ID', - 'code': 404 - } - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_adopt(self): - profile = server.ServerProfile('t', self.spec) - x_server = mock.Mock( - disk_config="", - availability_zone="AZ01", - block_device_mapping={"foo": "bar"}, - has_config_drive=False, - flavor={"id": "FLAVOR_ID"}, - image={"id": "IMAGE_ID"}, - key_name="FAKE_KEY", - metadata={ - "mkey": "mvalue", - "cluster_id": "CLUSTER_ID", - "cluster_node_id": "NODE_ID", - "cluster_node_index": 123 - }, - addresses={ - "NET1": [{ - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:09:6f:d8", - "OS-EXT-IPS:type": "fixed", - "addr": "ADDR1_IPv4", - "version": 4 - }, { - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:09:6f:d8", - "OS-EXT-IPS:type": "fixed", - "addr": "ADDR1_IPv6", - "version": 6 - }], - "NET2": [{ - "OS-EXT-IPS-MAC:mac_addr": "aa:e6:3e:09:6f:db", - "OS-EXT-IPS:type": "fixed", - "addr": "ADDR2_IPv4", - "version": 4 - }, { - "OS-EXT-IPS-MAC:mac_addr": "aa:e6:3e:09:6f:db", - "OS-EXT-IPS:type": "fixed", - "addr": "ADDR2_IPv6", - "version": 6 - }], - }, - security_groups=[{'name': 'GROUP1'}, {'name': 'GROUP2'}] - ) - x_server.name = "FAKE_NAME" - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - self.assertEqual(False, res['auto_disk_config']) - self.assertEqual('AZ01', res['availability_zone']) - self.assertEqual({'foo': 'bar'}, res['block_device_mapping_v2']) - self.assertFalse(res['config_drive']) - self.assertEqual('FLAVOR_ID', res['flavor']) - self.assertEqual('IMAGE_ID', res['image']) - self.assertEqual('FAKE_KEY', res['key_name']) - self.assertEqual({'mkey': 'mvalue'}, res['metadata']) - - self.assertEqual(2, len(res['networks'])) - self.assertIn({'network': 'NET1'}, res['networks']) - self.assertIn({'network': 'NET2'}, res['networks']) - self.assertIn('GROUP1', res['security_groups']) - self.assertIn('GROUP2', res['security_groups']) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_adopt_failed_get(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - err = exc.InternalError(code=404, message='No Server found for ID') - cc.server_get.side_effect = err - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_adopt(node_obj) - - expected = { - 'Error': { - 'code': 404, - 'message': 'No Server found for ID', - } - } - self.assertEqual(expected, res) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_adopt_with_overrides(self): - profile = server.ServerProfile('t', self.spec) - x_server = mock.Mock( - disk_config="", - availability_zone="AZ01", - block_device_mapping={"foo": "bar"}, - has_config_drive=False, - flavor={"id": "FLAVOR_ID"}, - image={"id": "IMAGE_ID"}, - key_name="FAKE_KEY", - metadata={ - "mkey": "mvalue", - "cluster_id": "CLUSTER_ID", - "cluster_node_id": "NODE_ID", - "cluster_node_index": 123 - }, - addresses={ - "NET1": [{ - "OS-EXT-IPS:type": "fixed", - }], - "NET2": [{ - "OS-EXT-IPS:type": "fixed", - }], - }, - security_groups=[{'name': 'GROUP1'}, {'name': 'GROUP2'}] - ) - x_server.name = "FAKE_NAME" - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - overrides = { - 'networks': [{"network": "NET3"}] - } - - res = profile.do_adopt(node_obj, overrides=overrides) - - self.assertEqual(False, res['auto_disk_config']) - self.assertEqual('AZ01', res['availability_zone']) - self.assertEqual({'foo': 'bar'}, res['block_device_mapping_v2']) - self.assertFalse(res['config_drive']) - self.assertEqual('FLAVOR_ID', res['flavor']) - self.assertEqual('IMAGE_ID', res['image']) - self.assertEqual('FAKE_KEY', res['key_name']) - self.assertEqual({'mkey': 'mvalue'}, res['metadata']) - self.assertIn({'network': 'NET3'}, res['networks']) - self.assertNotIn({'network': 'NET1'}, res['networks']) - self.assertNotIn({'network': 'NET2'}, res['networks']) - self.assertIn('GROUP1', res['security_groups']) - self.assertIn('GROUP2', res['security_groups']) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_join_successful(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - metadata = {} - cc.server_metadata_get.return_value = metadata - profile._computeclient = cc - - node_obj = mock.Mock(physical_id='FAKE_ID', index='123') - res = profile.do_join(node_obj, 'FAKE_CLUSTER_ID') - self.assertTrue(res) - - meta = {'cluster_id': 'FAKE_CLUSTER_ID', - 'cluster_node_index': '123'} - cc.server_metadata_update.assert_called_once_with( - 'FAKE_ID', meta) - - def test_do_join_server_not_created(self): - # Test path where server not specified - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id=None) - - res = profile.do_join(node_obj, 'FAKE_CLUSTER_ID') - - self.assertFalse(res) - - def test_do_leave_successful(self): - # Test normal path - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_leave(node_obj) - - self.assertTrue(res) - cc.server_metadata_delete.assert_called_once_with( - 'FAKE_ID', ['cluster_id', 'cluster_node_index']) - - def test_do_leave_no_physical_id(self): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id=None) - - res = profile.do_leave(node_obj) - - self.assertFalse(res) - - def test_do_check(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = None - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_check(test_server) - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertFalse(res) - - return_server = mock.Mock() - return_server.status = 'ACTIVE' - cc.server_get.return_value = return_server - res = profile.do_check(test_server) - cc.server_get.assert_called_with('FAKE_ID') - self.assertTrue(res) - - def test_do_check_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.do_check(obj) - - self.assertFalse(res) - - def test_do_check_no_server(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - err = exc.InternalError(code=404, message='No Server found') - cc.server_get.side_effect = err - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EServerNotFound, - profile.do_check, - node_obj) - - self.assertEqual("Failed in found server 'FAKE_ID': " - "No Server found.", - str(ex)) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_do_healthcheck_active(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status='ACTIVE') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, consts.NODE_STATUS_POLLING) - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertTrue(res) - - def test_do_healthcheck_empty_server_obj(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = None - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, consts.NODE_STATUS_POLLING) - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertTrue(res) - - def test_do_healthcheck_exception(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - ex = exc.InternalError(code=503, message='Error') - cc.server_get.side_effect = ex - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, consts.NODE_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertTrue(res) - - def test_do_healthcheck_error(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status='ERROR') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, consts.NODE_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertFalse(res) - - def test_do_healthcheck_server_not_found(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - ex = exc.InternalError(code=404, message='No Server found') - cc.server_get.side_effect = ex - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, consts.NODE_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertFalse(res) - - def test_do_healthcheck_empty_hv_name(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.hypervisor_find.return_value = None - cc.server_get.return_value = mock.Mock(hypervisor_hostname='') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_not_called() - self.assertTrue(res) - - def test_do_healthcheck_empty_hv_obj(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.hypervisor_find.return_value = None - cc.server_get.return_value = mock.Mock(hypervisor_hostname='FAKE_HV') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_called_once_with('FAKE_HV') - self.assertTrue(res) - - def test_do_healthcheck_hv_exception(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(hypervisor_hostname='FAKE_HV') - ex = exc.InternalError(code=503, message='Error') - cc.hypervisor_find.side_effect = ex - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_called_once_with('FAKE_HV') - self.assertTrue(res) - - def test_do_healthcheck_hv_not_found(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(hypervisor_hostname='FAKE_HV') - ex = exc.InternalError(code=404, message='No Hypervisor found') - cc.hypervisor_find.side_effect = ex - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_called_once_with('FAKE_HV') - self.assertFalse(res) - - def test_do_healthcheck_hv_down(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(hypervisor_hostname='FAKE_HV') - cc.hypervisor_find.return_value = mock.Mock(state='down') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_called_once_with('FAKE_HV') - self.assertFalse(res) - - def test_do_healthcheck_hv_disabled(self): - profile = server.ServerProfile('t', self.spec) - - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(hypervisor_hostname='FAKE_HV') - cc.hypervisor_find.return_value = mock.Mock(status='disabled') - profile._computeclient = cc - - test_server = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_healthcheck(test_server, - consts.HYPERVISOR_STATUS_POLLING) - - cc.server_get.assert_called_once_with('FAKE_ID') - cc.hypervisor_find.assert_called_once_with('FAKE_HV') - self.assertFalse(res) - - @mock.patch.object(server.ServerProfile, 'do_delete') - @mock.patch.object(server.ServerProfile, 'do_create') - def test_do_recover_operation_is_none(self, mock_create, mock_delete): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - mock_delete.return_value = None - mock_create.return_value = True - - res = profile.do_recover(node_obj, operation=None) - - self.assertTrue(res) - mock_delete.assert_called_once_with(node_obj, force=False, - timeout=None) - mock_create.assert_called_once_with(node_obj) - - @mock.patch.object(server.ServerProfile, 'handle_rebuild') - def test_do_recover_rebuild(self, mock_rebuild): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_recover(node_obj, operation='REBUILD') - - self.assertEqual(mock_rebuild.return_value, res) - mock_rebuild.assert_called_once_with(node_obj) - - @mock.patch.object(server.ServerProfile, 'handle_rebuild') - def test_do_recover_with_list(self, mock_rebuild): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_recover(node_obj, operation='REBUILD') - - self.assertEqual(mock_rebuild.return_value, res) - mock_rebuild.assert_called_once_with(node_obj) - - @mock.patch.object(server.ServerProfile, 'handle_reboot') - def test_do_recover_reboot(self, mock_reboot): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_recover(node_obj, operation='REBOOT') - - self.assertTrue(res) - self.assertEqual(mock_reboot.return_value, res) - mock_reboot.assert_called_once_with(node_obj, type='HARD') - - @mock.patch.object(profiles_base.Profile, 'do_recover') - def test_do_recover_bad_operation(self, mock_base_recover): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res, status = profile.do_recover(node_obj, - operation='BLAHBLAH') - - self.assertFalse(status) - - @mock.patch.object(profiles_base.Profile, 'do_recover') - def test_do_recover_fallback(self, mock_base_recover): - profile = server.ServerProfile('t', self.spec) - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.do_recover(node_obj, operation='RECREATE') - - self.assertEqual(mock_base_recover.return_value, res) - mock_base_recover.assert_called_once_with( - node_obj, operation='RECREATE') - - def test_handle_reboot(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - cc.server_reboot = mock.Mock() - cc.wait_for_server = mock.Mock() - profile._computeclient = cc - - # do it - res = profile.handle_reboot(obj, type='SOFT') - - self.assertTrue(res) - cc.server_reboot.assert_called_once_with('FAKE_ID', 'SOFT') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_reboot_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res, status = profile.handle_reboot(obj, type='SOFT') - - self.assertFalse(status) - - def test_handle_reboot_default_type(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - cc.server_reboot = mock.Mock() - cc.wait_for_server = mock.Mock() - profile._computeclient = cc - - # do it - res = profile.handle_reboot(obj) - - self.assertTrue(res) - cc.server_reboot.assert_called_once_with('FAKE_ID', 'SOFT') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_reboot_bad_type(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res, status = profile.handle_reboot(obj, type=['foo']) - self.assertFalse(status) - - res, status = profile.handle_reboot(obj, type='foo') - self.assertFalse(status) - - def test_handle_rebuild_with_image(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.server_rebuild.return_value = True - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.handle_rebuild(node_obj) - - self.assertTrue(res) - cc.server_get.assert_called_with('FAKE_ID') - cc.server_rebuild.assert_called_once_with('FAKE_ID', '123', - 'FAKE_SERVER_NAME', - 'adminpass') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_rebuild_with_bdm(self): - bdm_v2 = [ - { - 'volume_size': 1, - 'uuid': '123', - 'source_type': 'image', - 'destination_type': 'volume', - 'boot_index': 0, - } - ] - spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'admin_pass': 'adminpass', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - profile = server.ServerProfile('t', spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.server_rebuild.return_value = True - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res = profile.handle_rebuild(node_obj) - - self.assertTrue(res) - cc.server_get.assert_called_with('FAKE_ID') - cc.server_rebuild.assert_called_once_with('FAKE_ID', '123', - 'FAKE_SERVER_NAME', - 'adminpass') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_rebuild_server_not_found(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - err = exc.InternalError(code=404, message='FAKE_ID not found') - cc.server_get.side_effect = err - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_rebuild, - node_obj) - - self.assertEqual("Failed in rebuilding server 'FAKE_ID': " - "FAKE_ID not found.", - str(ex)) - cc.server_get.assert_called_once_with('FAKE_ID') - - def test_handle_rebuild_failed_rebuild(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - ex = exc.InternalError(code=500, message='cannot rebuild') - cc.server_rebuild.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_rebuild, - node_obj) - - self.assertEqual("Failed in rebuilding server 'FAKE_ID': " - "cannot rebuild.", - str(ex)) - cc.server_get.assert_called_once_with('FAKE_ID') - cc.server_rebuild.assert_called_once_with('FAKE_ID', '123', - 'FAKE_SERVER_NAME', - 'adminpass') - self.assertEqual(0, cc.wait_for_server.call_count) - - def test_handle_rebuild_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_rebuild, - node_obj) - - self.assertEqual("Failed in rebuilding server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_get.assert_called_once_with('FAKE_ID') - cc.server_rebuild.assert_called_once_with('FAKE_ID', '123', - 'FAKE_SERVER_NAME', - 'adminpass') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_rebuild_failed_retrieving_server(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - cc.server_get.return_value = None - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - res, status = profile.handle_rebuild(node_obj) - - self.assertFalse(status) - cc.server_get.assert_called_once_with('FAKE_ID') - self.assertEqual(0, cc.server_rebuild.call_count) - self.assertEqual(0, cc.wait_for_server.call_count) - - def test_handle_rebuild_no_physical_id(self): - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - test_server = mock.Mock() - test_server.physical_id = None - - res, status = profile.handle_rebuild(test_server) - - self.assertFalse(status) - - def test_handle_rebuild_failed_with_name(self): - self.spec['properties']['name'] = None - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - ex = exc.InternalError(code=400, - message='Server name is not ' - 'a string or unicode.') - cc.server_rebuild.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - node_obj.name = None - - ex = self.assertRaises(exc.ESchema, - profile.handle_rebuild, - node_obj) - - self.assertEqual("The value 'None' is not a valid string.", - str(ex)) - cc.server_get.assert_called_once_with('FAKE_ID') - cc.server_rebuild.assert_not_called() - self.assertEqual(0, cc.wait_for_server.call_count) - - def test_handle_change_password(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - cc.server_reboot = mock.Mock() - cc.wait_for_server = mock.Mock() - profile._computeclient = cc - - # do it - res = profile.handle_change_password(obj, admin_pass='new_pass') - - self.assertTrue(res) - cc.server_change_password.assert_called_once_with( - 'FAKE_ID', new_password='new_pass') - - def test_handle_change_password_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_change_password(obj, admin_pass='new_pass') - - self.assertFalse(res) - - def test_handle_change_password_no_password(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_change_password(obj) - - self.assertFalse(res) - - def test_handle_change_password_bad_param(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_change_password(obj, admin_pass=['foo']) - self.assertFalse(res) - - res = profile.handle_change_password(obj, foo='bar') - self.assertFalse(res) - - def test_handle_suspend(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_suspend(obj) - self.assertTrue(res) - - def test_handle_suspend_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_suspend(obj) - self.assertFalse(res) - - def test_handle_suspend_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_suspend, - node_obj) - - self.assertEqual("Failed in suspend server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_suspend.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'SUSPENDED') - - def test_handle_resume(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_resume(obj) - self.assertTrue(res) - - def test_handle_resume_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_resume(obj) - self.assertFalse(res) - - def test_handle_resume_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_resume, - node_obj) - - self.assertEqual("Failed in resume server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_resume.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_start(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_start(obj) - self.assertTrue(res) - - def test_handle_start_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_start(obj) - self.assertFalse(res) - - def test_handle_start_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_start, - node_obj) - - self.assertEqual("Failed in start server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_start.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_stop(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_stop(obj) - self.assertTrue(res) - - def test_handle_stop_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_stop(obj) - self.assertFalse(res) - - def test_handle_stop_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_stop, - node_obj) - - self.assertEqual("Failed in stop server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_stop.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'SHUTOFF') - - def test_handle_lock(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_lock(obj) - self.assertTrue(res) - - def test_handle_lock_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_lock(obj) - self.assertFalse(res) - - def test_handle_unlock(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_unlock(obj) - self.assertTrue(res) - - def test_handle_unlock_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_unlock(obj) - self.assertFalse(res) - - def test_handle_pause(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_pause(obj) - self.assertTrue(res) - - def test_handle_pause_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_pause(obj) - self.assertFalse(res) - - def test_handle_pause_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_pause, - node_obj) - - self.assertEqual("Failed in pause server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_pause.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'PAUSED') - - def test_handle_unpause(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_unpause(obj) - self.assertTrue(res) - - def test_handle_unpause_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_unpause(obj) - self.assertFalse(res) - - def test_handle_unpause_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_unpause, - node_obj) - - self.assertEqual("Failed in unpause server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_unpause.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_rescue(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - gc = mock.Mock() - profile._computeclient = cc - profile._glanceclient = gc - - # do it - res = profile.handle_rescue(obj, admin_pass='new_pass', - image='FAKE_IMAGE') - - self.assertTrue(res) - cc.server_rescue.assert_called_once_with( - 'FAKE_ID', admin_pass='new_pass', image_ref='FAKE_IMAGE') - gc.image_find.assert_called_once_with('FAKE_IMAGE', False) - - def test_handle_rescue_image_none(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - - res = profile.handle_rescue(obj, admin_pass='new_pass', - image=None) - self.assertFalse(res) - - def test_handle_rescue_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_rescue(obj) - self.assertFalse(res) - - def test_handle_rescue_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - gc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - profile._glanceclient = gc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_rescue, - node_obj, admin_pass='new_pass', - image='FAKE_IMAGE') - - self.assertEqual("Failed in rescue server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_rescue.assert_called_once_with('FAKE_ID', - admin_pass='new_pass', - image_ref='FAKE_IMAGE') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'RESCUE') - gc.image_find.assert_called_once_with('FAKE_IMAGE', False) - - def test_handle_unrescue(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_unrescue(obj) - self.assertTrue(res) - - def test_handle_unresuce_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_unrescue(obj) - self.assertFalse(res) - - def test_handle_unrescue_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_unrescue, - node_obj) - - self.assertEqual("Failed in unrescue server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_unrescue.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_migrate(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_migrate(obj) - self.assertTrue(res) - - def test_handle_migrate_no_physical_id(self): - obj = mock.Mock(physical_id=None) - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_migrate(obj) - self.assertFalse(res) - - def test_handle_migrate_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_migrate, - node_obj) - - self.assertEqual("Failed in migrate server 'FAKE_ID': " - "timeout.", str(ex)) - cc.server_migrate.assert_called_once_with('FAKE_ID') - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_snapshot(self): - obj = mock.Mock(physical_id='FAKE_ID', name='NODE001') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - - # do it - res = profile.handle_snapshot(obj) - self.assertTrue(res) - - def test_handle_snapshot_no_physical_id(self): - obj = mock.Mock(physical_id=None, name='NODE001') - profile = server.ServerProfile('t', self.spec) - - # do it - res = profile.handle_snapshot(obj) - self.assertFalse(res) - - def test_handle_snapshot_failed_waiting(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock(name='NODE001') - ex = exc.InternalError(code=500, message='timeout') - cc.wait_for_server.side_effect = ex - profile._computeclient = cc - node_obj = mock.Mock(physical_id='FAKE_ID', name='NODE001') - - ex = self.assertRaises(exc.EResourceOperation, - profile.handle_snapshot, - node_obj) - - self.assertEqual("Failed in snapshot server 'FAKE_ID': " - "timeout.", str(ex)) - cc.wait_for_server.assert_called_once_with('FAKE_ID', 'ACTIVE') - - def test_handle_restore(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - - # do it - res = profile.handle_restore(obj, admin_pass='new_pass', - image='FAKE_IMAGE') - - self.assertTrue(res) - - def test_handle_restore_image_none(self): - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - - res = profile.handle_restore(obj, admin_pass='new_pass', - image=None) - self.assertFalse(res) diff --git a/senlin/tests/unit/profiles/test_nova_server_update.py b/senlin/tests/unit/profiles/test_nova_server_update.py deleted file mode 100644 index 603b55790..000000000 --- a/senlin/tests/unit/profiles/test_nova_server_update.py +++ /dev/null @@ -1,1699 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from senlin.common import consts -from senlin.common import exception as exc -from senlin.objects import node as node_obj -from senlin.profiles.os.nova import server -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class TestServerNameChecking(base.SenlinTestCase): - - scenarios = [ - ('none-none', dict( - old_name=None, - new_name=None, - result=(False, 'NODE_NAME'))), - ('none-new', dict( - old_name=None, - new_name='NEW_NAME', - result=(True, 'NEW_NAME'))), - ('old-none', dict( - old_name='OLD_NAME', - new_name=None, - result=(True, 'NODE_NAME'))), - ('old-new', dict( - old_name='OLD_NAME', - new_name='NEW_NAME', - result=(True, 'NEW_NAME'))) - ] - - def setUp(self): - super(TestServerNameChecking, self).setUp() - self.old_spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAVOR', - } - } - self.new_spec = copy.deepcopy(self.old_spec) - obj = mock.Mock() - obj.name = 'NODE_NAME' - self.obj = obj - - def test_check_server_name(self): - if self.old_name: - self.old_spec['properties']['name'] = self.old_name - if self.new_name: - self.new_spec['properties']['name'] = self.new_name - - profile = server.ServerProfile('t', self.old_spec) - new_profile = server.ServerProfile('t1', self.new_spec) - - res = profile._check_server_name(self.obj, new_profile) - - self.assertEqual(self.result, res) - - -class TestPasswordChecking(base.SenlinTestCase): - - scenarios = [ - ('none-none', dict( - old_passwd=None, - new_passwd=None, - result=(False, ''))), - ('none-new', dict( - old_passwd=None, - new_passwd='NEW_PASSWD', - result=(True, 'NEW_PASSWD'))), - ('old-none', dict( - old_passwd='OLD_PASSWD', - new_passwd=None, - result=(True, ''))), - ('old-new', dict( - old_passwd='OLD_PASSWD', - new_passwd='NEW_PASSWD', - result=(True, 'NEW_PASSWD'))) - ] - - def setUp(self): - super(TestPasswordChecking, self).setUp() - self.old_spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAVOR', - } - } - self.new_spec = copy.deepcopy(self.old_spec) - self.obj = mock.Mock() - - def test_check_password(self): - if self.old_passwd: - self.old_spec['properties']['admin_pass'] = self.old_passwd - if self.new_passwd: - self.new_spec['properties']['admin_pass'] = self.new_passwd - - profile = server.ServerProfile('t', self.old_spec) - new_profile = server.ServerProfile('t1', self.new_spec) - - res = profile._check_password(self.obj, new_profile) - - self.assertEqual(self.result, res) - - -class TestNovaServerUpdate(base.SenlinTestCase): - def setUp(self): - super(TestNovaServerUpdate, self).setUp() - - self.context = utils.dummy_context() - self.spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'context': {}, - 'admin_pass': 'adminpass', - 'auto_disk_config': True, - 'availability_zone': 'FAKE_AZ', - 'block_device_mapping': [{ - 'device_name': 'FAKE_NAME', - 'volume_size': 1000, - }], - 'config_drive': False, - 'flavor': 'FLAV', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - "metadata": {"meta var": "meta val"}, - 'name': 'FAKE_SERVER_NAME', - 'networks': [{ - 'port': 'FAKE_PORT', - 'fixed_ip': 'FAKE_IP', - 'network': 'FAKE_NET', - }], - 'personality': [{ - 'path': '/etc/motd', - 'contents': 'foo', - }], - 'scheduler_hints': { - 'same_host': 'HOST_ID', - }, - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'user_data': 'FAKE_USER_DATA', - } - } - self.patchobject(node_obj.Node, 'update') - - def test_update_name(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - obj = mock.Mock(physical_id='NOVA_ID') - - res = profile._update_name(obj, 'NEW_NAME') - - self.assertIsNone(res) - cc.server_update.assert_called_once_with('NOVA_ID', name='NEW_NAME') - - def test_update_name_nova_failure(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - cc.server_update.side_effect = exc.InternalError(message='BOOM') - obj = mock.Mock(physical_id='NOVA_ID') - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_name, - obj, 'NEW_NAME') - - self.assertEqual("Failed in updating server 'NOVA_ID': BOOM.", - str(ex)) - cc.server_update.assert_called_once_with('NOVA_ID', name='NEW_NAME') - - def test_update_password(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - obj = mock.Mock(physical_id='NOVA_ID') - - res = profile._update_password(obj, 'NEW_PASSWORD') - - self.assertIsNone(res) - cc.server_change_password.assert_called_once_with( - 'NOVA_ID', 'NEW_PASSWORD') - - def test_update_password_nova_failure(self): - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - err = exc.InternalError(message='BOOM') - cc.server_change_password.side_effect = err - obj = mock.Mock(physical_id='NOVA_ID') - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_password, - obj, 'NEW_PASSWORD') - - self.assertEqual("Failed in updating server 'NOVA_ID': BOOM.", - str(ex)) - cc.server_change_password.assert_called_once_with( - 'NOVA_ID', 'NEW_PASSWORD') - - def test_update_metadata(self): - obj = mock.Mock(id='NODE_ID', physical_id='NOVA_ID', - cluster_id='CLUSTER_ID', index=456) - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['metadata'] = {'new_key': 'new_value'} - new_profile = server.ServerProfile('t', new_spec) - - res = profile._update_metadata(obj, new_profile) - - self.assertIsNone(res) - cc.server_metadata_update.assert_called_once_with( - 'NOVA_ID', - { - 'new_key': 'new_value', - 'cluster_node_id': 'NODE_ID', - 'cluster_id': 'CLUSTER_ID', - 'cluster_node_index': '456', - } - ) - - def test__update_metadata_no_change(self): - obj = mock.Mock(id='NODE_ID') - profile = server.ServerProfile('t', self.spec) - cc = mock.Mock() - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_profile = server.ServerProfile('t', new_spec) - - res = profile._update_metadata(obj, new_profile) - - self.assertIsNone(res) - self.assertEqual(0, cc.server_metadata_update.call_count) - - def test_update_metadata_nova_failure(self): - obj = mock.Mock(id='NODE_ID', physical_id='NOVA_ID', cluster_id='') - err = exc.InternalError(code=500, message='Nova Error') - cc = mock.Mock() - cc.server_metadata_update.side_effect = err - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - - # new profile with new metadata - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['metadata'] = {'fooa': 'baaar'} - new_profile = server.ServerProfile('t', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_metadata, - obj, new_profile) - - self.assertEqual("Failed in updating server 'NOVA_ID': " - "Nova Error.", str(ex)) - cc.server_metadata_update.assert_called_once_with( - 'NOVA_ID', {'fooa': 'baaar', 'cluster_node_id': 'NODE_ID'} - ) - - def test_update_flavor(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - profile = server.ServerProfile('t', self.spec) - profile.stop_timeout = 123 - profile._computeclient = cc - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - profile._update_flavor(obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update') - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_confirm.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, - timeout=profile.stop_timeout), - mock.call('NOVA_ID', 'VERIFY_RESIZE'), - mock.call('NOVA_ID', consts.VS_SHUTOFF)]) - - # update flavor on server that is already stopped - def test_update_flavor_stopped_server(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_SHUTOFF) - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - profile._update_flavor(obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update') - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_confirm.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', 'VERIFY_RESIZE'), - mock.call('NOVA_ID', consts.VS_SHUTOFF)]) - - def test_update_flavor_failed_validation(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - err = exc.EResourceUpdate(type='server', id='NOVA_ID', message='BOOM') - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=err) - - self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - mock_validate.assert_called_once_with(obj, 'FLAV', 'update') - - def test_update_flavor_failed_validation_2(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - result = [ - mock.Mock(), - exc.EResourceUpdate(type='server', id='NOVA_ID', message='BOOM') - ] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=result) - - self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - - def test_update_flavor_same(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_profile = server.ServerProfile('t1', new_spec) - - x_flavors = [mock.Mock(id=123), mock.Mock(id=123)] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - res = profile._update_flavor(obj, new_profile) - - self.assertFalse(res) - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'FLAV', 'update'), - ]) - self.assertEqual(0, cc.server_resize.call_count) - - def test_update_flavor_server_stop_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - cc.server_stop.side_effect = [ - exc.InternalError(code=500, message='Stop failed')] - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_not_called() - cc.server_resize_revert.assert_not_called() - cc.wait_for_server.assert_not_called() - self.assertEqual("Failed in updating server 'NOVA_ID': Stop " - "failed.", str(ex)) - - def test_update_flavor_server_paused(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_PAUSED) - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_not_called() - cc.server_resize_revert.assert_not_called() - cc.wait_for_server.assert_not_called() - self.assertEqual("Failed in updating server 'NOVA_ID': Server needs " - "to be ACTIVE or STOPPED in order to update flavor.", - str(ex)) - - def test_update_flavor_resize_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.side_effect = [ - mock.Mock(status=consts.VS_ACTIVE), - mock.Mock(status='RESIZE')] - cc.server_resize.side_effect = [ - exc.InternalError(code=500, message='Resize failed')] - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_revert.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - mock.call('NOVA_ID', consts.VS_SHUTOFF), - mock.call('NOVA_ID', consts.VS_ACTIVE) - ]) - self.assertEqual("Failed in updating server 'NOVA_ID': Resize " - "failed.", str(ex)) - - def test_update_flavor_first_wait_for_server_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - cc.wait_for_server.side_effect = [ - exc.InternalError(code=500, message='TIMEOUT') - ] - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - # do it - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - # assertions - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_not_called() - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600)]) - self.assertEqual("Failed in updating server 'NOVA_ID': " - "TIMEOUT.", str(ex)) - - def test_update_flavor_second_wait_for_server_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.side_effect = [ - mock.Mock(status=consts.VS_ACTIVE), - mock.Mock(status='RESIZE')] - cc.wait_for_server.side_effect = [ - None, - exc.InternalError(code=500, message='TIMEOUT'), - None, - None - ] - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - # do it - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - # assertions - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - mock.call('NOVA_ID', 'VERIFY_RESIZE'), - mock.call('NOVA_ID', consts.VS_SHUTOFF), - mock.call('NOVA_ID', consts.VS_ACTIVE), - ]) - cc.server_resize_revert.assert_called_once_with('NOVA_ID') - self.assertEqual("Failed in updating server 'NOVA_ID': " - "TIMEOUT.", str(ex)) - - def test_update_flavor_resize_failed_revert_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.side_effect = [ - mock.Mock(status=consts.VS_ACTIVE), - mock.Mock(status='RESIZE')] - err_resize = exc.InternalError(code=500, message='Resize') - cc.server_resize.side_effect = err_resize - err_revert = exc.InternalError(code=500, message='Revert') - cc.server_resize_revert.side_effect = err_revert - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - # do it - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - # assertions - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_revert.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - ]) - self.assertEqual("Failed in updating server 'NOVA_ID': " - "Revert.", str(ex)) - - def test_update_flavor_confirm_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - err_confirm = exc.InternalError(code=500, message='Confirm') - cc.server_resize_confirm.side_effect = err_confirm - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - # do it - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_flavor, - obj, new_profile) - - # assertions - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_confirm.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - mock.call('NOVA_ID', 'VERIFY_RESIZE'), - ]) - self.assertEqual("Failed in updating server 'NOVA_ID': Confirm.", - str(ex)) - - def test_update_flavor_wait_confirm_failed(self): - obj = mock.Mock(physical_id='NOVA_ID') - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_SHUTOFF) - err_wait = exc.InternalError(code=500, message='Wait') - cc.wait_for_server.side_effect = [None, err_wait] - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'new_flavor' - new_profile = server.ServerProfile('t1', new_spec) - x_flavors = [mock.Mock(id='123'), mock.Mock(id='456')] - mock_validate = self.patchobject(profile, '_validate_flavor', - side_effect=x_flavors) - - # do it - ex = self.assertRaises(exc.InternalError, - profile._update_flavor, - obj, new_profile) - - # assertions - mock_validate.assert_has_calls([ - mock.call(obj, 'FLAV', 'update'), - mock.call(obj, 'new_flavor', 'update'), - ]) - cc.server_resize.assert_called_once_with('NOVA_ID', '456') - cc.server_resize_confirm.assert_called_once_with('NOVA_ID') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', 'VERIFY_RESIZE'), - mock.call('NOVA_ID', consts.VS_SHUTOFF) - ]) - self.assertEqual("Failed in updating server 'NOVA_ID': Wait.", - str(ex)) - - def test_update_image(self): - profile = server.ServerProfile('t', self.spec) - profile.stop_timeout = 123 - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_ACTIVE) - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - profile._update_image(obj, new_profile, 'new_name', 'new_pass') - - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_called_once_with( - 'NOVA_ID', '456', 'new_name', 'new_pass') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, - timeout=profile.stop_timeout), - mock.call('NOVA_ID', consts.VS_SHUTOFF), - ]) - - def test_update_image_server_stopped(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_SHUTOFF) - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - profile._update_image(obj, new_profile, 'new_name', 'new_pass') - - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_called_once_with( - 'NOVA_ID', '456', 'new_name', 'new_pass') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF), - ]) - - def test_update_image_server_paused(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_PAUSED) - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', '') - - msg = ("Failed in updating server 'NOVA_ID': Server needs to be ACTIVE" - " or STOPPED in order to update image.") - self.assertEqual(msg, str(ex)) - - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_not_called() - cc.wait_for_server.assert_not_called() - - def test_update_image_new_image_is_none(self): - profile = server.ServerProfile('t', self.spec) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - del new_spec['properties']['image'] - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', '') - - msg = ("Failed in updating server 'NOVA_ID': Updating Nova server" - " with image set to None is not supported by Nova.") - self.assertEqual(msg, str(ex)) - - def test_update_image_new_image_invalid(self): - # NOTE: The image invalid could be caused by a non-existent image or - # a compute driver failure - profile = server.ServerProfile('t', self.spec) - # _validate_image will always throw EResourceUpdate if driver fails - err = exc.EResourceUpdate(type='server', id='NOVA_ID', message='BAD') - mock_check = self.patchobject(profile, '_validate_image', - side_effect=err) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - msg = ("Failed in updating server 'NOVA_ID': BAD.") - self.assertEqual(msg, str(ex)) - mock_check.assert_called_once_with(obj, 'new_image', reason='update') - - def test_update_image_old_image_invalid(self): - # NOTE: The image invalid could be caused by a non-existent image or - # a compute driver failure - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - # _validate_image will always throw EResourceUpdate if driver fails - results = [ - exc.EResourceUpdate(type='server', id='NOVA_ID', message='BAD') - ] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=results) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - msg = ("Failed in updating server 'NOVA_ID': BAD.") - self.assertEqual(msg, str(ex)) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - - def test_update_image_old_image_is_none_but_succeeded(self): - old_spec = copy.deepcopy(self.spec) - del old_spec['properties']['image'] - profile = server.ServerProfile('t', old_spec) - cc = mock.Mock() - profile._computeclient = cc - x_server = mock.Mock(image={'id': '123'}, status=consts.VS_ACTIVE) - cc.server_get.return_value = x_server - # this is the new one - x_image = mock.Mock(id='456') - mock_check = self.patchobject(profile, '_validate_image', - return_value=x_image) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - res = profile._update_image(obj, new_profile, 'new_name', 'new_pass') - - self.assertTrue(res) - mock_check.assert_called_once_with(obj, 'new_image', reason='update') - cc.server_get.assert_called_once_with('NOVA_ID') - cc.server_rebuild.assert_called_once_with( - 'NOVA_ID', '456', 'new_name', 'new_pass') - cc.wait_for_server.assert_has_calls([ - # first wait is from active to shutoff and has custom timeout - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - mock.call('NOVA_ID', consts.VS_SHUTOFF), - ]) - - def test_update_image_old_image_is_none_but_failed(self): - old_spec = copy.deepcopy(self.spec) - del old_spec['properties']['image'] - profile = server.ServerProfile('t', old_spec) - cc = mock.Mock() - profile._computeclient = cc - # this is about the new one - x_image = mock.Mock(id='456') - mock_check = self.patchobject(profile, '_validate_image', - return_value=x_image) - cc.server_get.side_effect = exc.InternalError(message='DRIVER') - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - self.assertEqual("Failed in updating server 'NOVA_ID': DRIVER.", - str(ex)) - mock_check.assert_called_once_with(obj, 'new_image', reason='update') - cc.server_get.assert_called_once_with('NOVA_ID') - - def test_update_image_updating_to_same_image(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - x_new_image = mock.Mock(id='123') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - res = profile._update_image(obj, new_profile, 'new_name', 'new_pass') - - self.assertFalse(res) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - self.assertEqual(0, cc.server_rebuild.call_count) - self.assertEqual(0, cc.wait_for_server.call_count) - - def test_update_image_failed_stopping(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.server_stop.side_effect = exc.InternalError(message='FAILED') - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - self.assertEqual("Failed in updating server 'NOVA_ID': Server needs to" - " be ACTIVE or STOPPED in order to update image.", - str(ex)) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_not_called() - cc.wait_for_server.assert_not_called() - - def test_update_image_failed_rebuilding(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_ACTIVE) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.server_rebuild.side_effect = exc.InternalError(message='FAILED') - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - self.assertEqual("Failed in updating server 'NOVA_ID': FAILED.", - str(ex)) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_called_once_with( - 'NOVA_ID', '456', 'new_name', 'new_pass') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - ]) - - def test_update_image_failed_first_waiting(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_ACTIVE) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.wait_for_server.side_effect = exc.InternalError(message='TIMEOUT') - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - self.assertEqual("Failed in updating server 'NOVA_ID': TIMEOUT.", - str(ex)) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_not_called() - cc.wait_for_server.assert_called_once_with( - 'NOVA_ID', consts.VS_SHUTOFF, timeout=600) - - def test_update_image_failed_second_waiting(self): - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_ACTIVE) - cc = mock.Mock() - cc.server_get.return_value = x_server - cc.wait_for_server.side_effect = [ - None, - exc.InternalError(message='TIMEOUT')] - profile._computeclient = cc - x_new_image = mock.Mock(id='456') - x_images = [x_new_image] - mock_check = self.patchobject(profile, '_validate_image', - side_effect=x_images) - obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'new_image' - new_profile = server.ServerProfile('t1', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_image, - obj, new_profile, 'new_name', 'new_pass') - - self.assertEqual("Failed in updating server 'NOVA_ID': TIMEOUT.", - str(ex)) - mock_check.assert_has_calls([ - mock.call(obj, 'new_image', reason='update'), - ]) - cc.server_rebuild.assert_called_once_with( - 'NOVA_ID', '456', 'new_name', 'new_pass') - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, timeout=600), - mock.call('NOVA_ID', consts.VS_SHUTOFF)]) - - def test_create_interfaces(self): - cc = mock.Mock() - server_obj = mock.Mock() - cc.server_get.return_value = server_obj - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - validation_results = [ - {'network': 'net1_id', 'fixed_ip': 'ip2'}, - {'network': 'net2_id'}, - {'port': 'port4'} - ] - mock_validate = self.patchobject(profile, '_validate_network', - side_effect=validation_results) - ports_results = [ - (mock.Mock( - id='port1_id', network_id='net1_id', - fixed_ips=[{'ip_address': 'ip2'}], security_group_ids=[]), - None), - (mock.Mock( - id='port2_id', network_id='net2_id', - fixed_ips=[{'ip_address': 'ip3'}], security_group_ids=[]), - None), - (mock.Mock( - id='port4_id', network_id='net3_id', - fixed_ips=[{'ip_address': 'ip4'}], security_group_ids=[]), - None) - ] - mock_get_port = self.patchobject(profile, '_get_port', - side_effect=ports_results) - networks = [ - {'network': 'net1', 'port': None, 'fixed_ip': 'ip2'}, - {'network': 'net2', 'port': None, 'fixed_ip': None}, - {'network': None, 'port': 'port4', 'fixed_ip': None} - ] - obj = mock.Mock(physical_id='NOVA_ID', data={}) - - res = profile._update_network_add_port(obj, networks) - - self.assertIsNone(res) - cc.server_get.assert_called_with('NOVA_ID') - validation_calls = [ - mock.call(obj, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip2'}, - 'update'), - mock.call(obj, - {'network': 'net2', 'port': None, 'fixed_ip': None}, - 'update'), - mock.call(obj, - {'network': None, 'port': 'port4', 'fixed_ip': None}, - 'update') - ] - mock_validate.assert_has_calls(validation_calls) - mock_get_port.assert_called_with(obj, {'port': 'port4'}) - create_calls = [ - mock.call(server_obj, port_id='port1_id'), - mock.call(server_obj, port_id='port2_id'), - mock.call(server_obj, port_id='port4_id'), - ] - cc.server_interface_create.assert_has_calls(create_calls) - - def test_create_interfaces_failed_getting_server(self): - cc = mock.Mock() - cc.server_get.side_effect = exc.InternalError(message='Not valid') - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - self.patchobject(profile, '_create_ports_from_properties') - - obj = mock.Mock(physical_id='NOVA_ID') - networks = [{'foo': 'bar'}] # not used - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_network_add_port, - obj, networks) - - self.assertEqual("Failed in updating server 'NOVA_ID': Not valid.", - str(ex)) - cc.server_get.assert_called_once_with('NOVA_ID') - self.assertEqual(0, profile._create_ports_from_properties.call_count) - - def test_create_interfaces_failed_validation(self): - cc = mock.Mock() - server_obj = mock.Mock() - cc.server_get.return_value = server_obj - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - err = exc.EResourceUpdate(type='server', id='NOVA_ID', - message='Driver error') - mock_validate = self.patchobject(profile, '_validate_network', - side_effect=err) - networks = [{'network': 'net1', 'port': None, 'fixed_ip': 'ip2'}] - obj = mock.Mock(physical_id='NOVA_ID') - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_network_add_port, - obj, networks) - - self.assertEqual("Failed in updating server 'NOVA_ID': Driver error.", - str(ex)) - cc.server_get.assert_called_once_with('NOVA_ID') - mock_validate.assert_called_once_with(obj, networks[0], 'update') - self.assertEqual(0, cc.server_interface_create.call_count) - - def test_delete_interfaces(self): - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - nc = mock.Mock() - net1 = mock.Mock(id='net1') - nc.network_get.return_value = net1 - nc.port_find.return_value = mock.Mock(id='port3', status='DOWN') - profile = server.ServerProfile('t', self.spec) - profile.stop_timeout = 232 - profile._computeclient = cc - profile._networkclient = nc - obj = mock.Mock(physical_id='NOVA_ID', data={'internal_ports': [ - {'id': 'port1', 'network_id': 'net1', 'remove': True, - 'fixed_ips': [{'ip_address': 'ip1'}]}, - {'id': 'port2', 'network_id': 'net1', 'remove': True, - 'fixed_ips': [{'ip_address': 'ip-random2'}]}, - {'id': 'port3', 'network_id': 'net1', 'remove': True, - 'fixed_ips': [{'ip_address': 'ip3'}]}]}) - networks = [ - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1'}, - {'network': 'net1', 'port': None, 'fixed_ip': None}, - {'network': None, 'port': 'port3', 'fixed_ip': None} - ] - - res = profile._update_network_remove_port(obj, networks) - - self.assertIsNone(res) - - nc.network_get.assert_has_calls([ - mock.call('net1'), mock.call('net1') - ]) - cc.wait_for_server.assert_has_calls([ - mock.call('NOVA_ID', consts.VS_SHUTOFF, - timeout=profile.stop_timeout), - ]) - cc.server_interface_delete.assert_has_calls([ - mock.call('port1', 'NOVA_ID'), - mock.call('port2', 'NOVA_ID'), - mock.call('port3', 'NOVA_ID'), - ]) - nc.port_delete.assert_has_calls([ - mock.call('port1', ignore_missing=True), - mock.call('port2', ignore_missing=True), - mock.call('port3', ignore_missing=True), - ]) - - def test_delete_interfaces_failed_delete(self): - cc = mock.Mock() - profile = server.ServerProfile('t', self.spec) - profile._computeclient = cc - profile._networkclient = mock.Mock() - candidate_ports = [ - [{'id': 'port1', 'network_id': 'net1', - 'fixed_ips': [{'ip_address': 'ip1'}]}], - ] - self.patchobject(profile, '_find_port_by_net_spec', - side_effect=candidate_ports) - err = exc.InternalError(message='BANG') - cc.server_interface_delete.side_effect = err - internal_ports = [ - {'id': 'port1', 'remove': True} - ] - obj = mock.Mock(physical_id='NOVA_ID', - data={'internal_ports': internal_ports}) - networks = [ - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1'}, - ] - - ex = self.assertRaises(exc.EResourceUpdate, - profile._update_network_remove_port, - obj, networks) - - self.assertEqual("Failed in updating server 'NOVA_ID': BANG.", - str(ex)) - cc.server_interface_delete.assert_called_once_with('port1', 'NOVA_ID') - - def test_update_port(self): - cc = mock.Mock() - cc.server_get.return_value = mock.Mock(status=consts.VS_ACTIVE) - nc = mock.Mock() - net1 = mock.Mock(id='net1') - nc.network_get.return_value = net1 - nc.port_find.return_value = mock.Mock(id='port3', status='DOWN') - profile = server.ServerProfile('t', self.spec) - profile.stop_timeout = 232 - profile._computeclient = cc - profile._networkclient = nc - validation_results = [ - {'network': 'net1_id', 'fixed_ip': 'ip1', - 'security_groups': ['sg1_id']}, - {'network': 'net1_id', 'fixed_ip': 'ip1', - 'security_groups': ['sg1_id', 'sg2_id']}, - {'network': 'net1_id', 'fixed_ip': 'ip1'} - ] - mock_validate = self.patchobject(profile, '_validate_network', - side_effect=validation_results) - candidate_ports = [ - [{'id': 'port1_id', 'network_id': 'net1_id', - 'fixed_ips': [{'ip_address': 'ip1'}]}], - [{'id': 'port2_id', 'network_id': 'net1_id', - 'fixed_ips': [{'ip_address': 'ip1'}]}], - [{'id': 'port3_id', 'network_id': 'net1_id', - 'fixed_ips': [{'ip_address': 'ip1'}]}] - ] - self.patchobject(profile, '_find_port_by_net_spec', - side_effect=candidate_ports) - - obj = mock.Mock(physical_id='NOVA_ID', data={'internal_ports': [ - {'id': 'port1', 'network_id': 'net1', - 'fixed_ips': [{'ip_address': 'ip1'}]}, - {'id': 'port2', 'network_id': 'net1', 'remove': True, - 'fixed_ips': [{'ip_address': 'ip-random2'}], - 'security_groups': ['default']}, - {'id': 'port3', 'network_id': 'net1', 'remove': True, - 'fixed_ips': [{'ip_address': 'ip3'}], - 'security_groups': ['default']} - ]}) - networks = [ - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default'], 'floating_network': None, - 'floating_ip': None}, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default', 'blah'], 'floating_network': None, - 'floating_ip': None}, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': None, 'floating_network': None, - 'floating_ip': None}, - ] - - res = profile._update_network_update_port(obj, networks) - - self.assertIsNone(res) - validation_calls = [ - mock.call(obj, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default'], - 'floating_network': None, 'floating_ip': None}, - 'update'), - mock.call(obj, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default', 'blah'], - 'floating_network': None, 'floating_ip': None}, - 'update'), - mock.call(obj, - {'network': 'net1', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': None, 'floating_network': None, - 'floating_ip': None}, - 'update') - ] - mock_validate.assert_has_calls(validation_calls) - update_calls = [ - mock.call('port1_id', security_groups=['sg1_id']), - mock.call('port2_id', security_groups=['sg1_id', 'sg2_id']), - mock.call('port3_id', security_groups=[]), - ] - nc.port_update.assert_has_calls(update_calls) - - @mock.patch.object(server.ServerProfile, '_update_network_update_port') - @mock.patch.object(server.ServerProfile, '_update_network_remove_port') - @mock.patch.object(server.ServerProfile, '_update_network_add_port') - def test_update_network(self, mock_create, mock_delete, mock_update): - obj = mock.Mock(physical_id='FAKE_ID') - - old_spec = copy.deepcopy(self.spec) - old_spec['properties']['networks'] = [ - {'network': 'net1', 'fixed_ip': 'ip1'}, - {'network': 'net1'}, - {'port': 'port3'}, - # sg only changes: - {'network': 'net3', 'fixed_ip': 'ip1'}, - {'network': 'net4', 'fixed_ip': 'ip1', - 'security_groups': ['blah']}, - {'port': 'port5', 'security_groups': ['default']}, - ] - profile = server.ServerProfile('t', old_spec) - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['networks'] = [ - {'network': 'net1', 'fixed_ip': 'ip2'}, - {'network': 'net2'}, - {'port': 'port4'}, - # sg only changes: - {'network': 'net3', 'fixed_ip': 'ip1', - 'security_groups': ['default']}, - {'network': 'net4', 'fixed_ip': 'ip1', - 'security_groups': ['default']}, - {'port': 'port5', 'security_groups': ['default', 'blah']}, - ] - new_profile = server.ServerProfile('t1', new_spec) - - networks_created, networks_deleted = profile._update_network( - obj, new_profile) - - self.assertTrue(networks_created) - self.assertTrue(networks_deleted) - - networks_create = [ - {'floating_network': None, 'network': 'net1', 'fixed_ip': 'ip2', - 'floating_ip': None, 'port': None, 'vnic_type': None, - 'security_groups': None, 'subnet': None}, - {'floating_network': None, 'network': 'net2', 'fixed_ip': None, - 'floating_ip': None, 'port': None, 'vnic_type': None, - 'security_groups': None, 'subnet': None}, - {'floating_network': None, 'network': None, 'fixed_ip': None, - 'floating_ip': None, 'port': 'port4', 'vnic_type': None, - 'security_groups': None, 'subnet': None} - ] - mock_create.assert_called_once_with(obj, networks_create) - networks_delete = [ - {'floating_network': None, 'network': 'net1', 'fixed_ip': 'ip1', - 'floating_ip': None, 'port': None, 'vnic_type': None, - 'security_groups': None, 'subnet': None}, - {'floating_network': None, 'network': 'net1', 'fixed_ip': None, - 'floating_ip': None, 'port': None, 'vnic_type': None, - 'security_groups': None, 'subnet': None}, - {'floating_network': None, 'network': None, 'fixed_ip': None, - 'floating_ip': None, 'port': 'port3', 'vnic_type': None, - 'security_groups': None, 'subnet': None} - ] - mock_delete.assert_called_once_with(obj, networks_delete) - networks_update = [ - {'network': 'net3', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default'], 'floating_network': None, - 'vnic_type': None, 'floating_ip': None, 'subnet': None}, - {'network': 'net4', 'port': None, 'fixed_ip': 'ip1', - 'security_groups': ['default'], 'floating_network': None, - 'vnic_type': None, 'floating_ip': None, 'subnet': None}, - {'network': None, 'port': 'port5', 'fixed_ip': None, - 'security_groups': ['default', 'blah'], 'floating_network': None, - 'vnic_type': None, 'floating_ip': None, 'subnet': None} - ] - mock_update.assert_called_once_with(obj, networks_update) - - @mock.patch.object(server.ServerProfile, '_update_password') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_check_server_name') - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_metadata') - @mock.patch.object(server.ServerProfile, '_update_image') - @mock.patch.object(server.ServerProfile, '_update_network') - def test_do_update_name_succeeded(self, mock_update_network, - mock_update_image, mock_update_metadata, - mock_update_flavor, mock_check_name, - mock_update_name, mock_check_password, - mock_update_password): - mock_check_name.return_value = True, 'NEW_NAME' - mock_check_password.return_value = True, 'NEW_PASSWORD' - mock_update_image.return_value = False - mock_update_flavor.return_value = False - mock_update_network.return_value = False, False - obj = mock.Mock(physical_id='FAKE_ID') - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - profile._computeclient.server_get = mock.Mock() - profile._computeclient.server_start = mock.Mock() - new_profile = server.ServerProfile('t', self.spec) - - res = profile.do_update(obj, new_profile) - - self.assertTrue(res) - mock_check_name.assert_called_once_with(obj, new_profile) - mock_update_metadata.assert_called_once_with(obj, new_profile) - mock_update_image.assert_called_once_with( - obj, new_profile, 'NEW_NAME', 'NEW_PASSWORD') - mock_update_name.assert_called_once_with(obj, 'NEW_NAME') - mock_update_password.assert_called_once_with(obj, 'NEW_PASSWORD') - mock_update_flavor.assert_called_once_with(obj, new_profile) - mock_update_network.assert_called_once_with(obj, new_profile) - - @mock.patch.object(server.ServerProfile, '_update_password') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_check_server_name') - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_metadata') - @mock.patch.object(server.ServerProfile, '_update_image') - @mock.patch.object(server.ServerProfile, '_update_network') - def test_do_update_name_no_change(self, mock_update_network, - mock_update_image, mock_update_metadata, - mock_update_flavor, mock_check_name, - mock_update_name, mock_check_password, - mock_update_password): - mock_check_name.return_value = False, 'NEW_NAME' - mock_check_password.return_value = False, 'OLD_PASS' - mock_update_network.return_value = False, False - obj = mock.Mock(physical_id='NOVA_ID') - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - new_profile = server.ServerProfile('t', self.spec) - - res = profile.do_update(obj, new_profile) - - self.assertTrue(res) - mock_check_name.assert_called_once_with(obj, new_profile) - mock_check_password.assert_called_once_with(obj, new_profile) - mock_update_image.assert_called_once_with( - obj, new_profile, 'NEW_NAME', 'OLD_PASS') - self.assertEqual(0, mock_update_name.call_count) - self.assertEqual(0, mock_update_password.call_count) - mock_update_flavor.assert_called_once_with(obj, new_profile) - mock_update_network.assert_called_once_with(obj, new_profile) - mock_update_metadata.assert_called_once_with(obj, new_profile) - - @mock.patch.object(server.ServerProfile, '_update_password') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_check_server_name') - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_metadata') - @mock.patch.object(server.ServerProfile, '_update_image') - @mock.patch.object(server.ServerProfile, '_update_network') - def test_do_update_name_failed(self, mock_update_network, - mock_update_image, mock_update_metadata, - mock_update_flavor, mock_check_name, - mock_update_name, mock_check_password, - mock_update_password): - mock_check_name.return_value = True, 'NEW_NAME' - mock_check_password.return_value = False, 'OLD_PASS' - mock_update_image.return_value = False - err = exc.EResourceUpdate(type='server', id='NOVA_ID', message='BANG') - mock_update_name.side_effect = err - obj = mock.Mock(physical_id='NOVA_ID') - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - new_profile = server.ServerProfile('t', self.spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - obj, new_profile) - - self.assertEqual("Failed in updating server 'NOVA_ID': BANG.", - str(ex)) - mock_check_name.assert_called_once_with(obj, new_profile) - mock_check_password.assert_called_once_with(obj, new_profile) - mock_update_image.assert_called_once_with( - obj, new_profile, 'NEW_NAME', 'OLD_PASS') - mock_update_name.assert_called_once_with(obj, 'NEW_NAME') - self.assertEqual(0, mock_update_password.call_count) - self.assertEqual(0, mock_update_flavor.call_count) - self.assertEqual(0, mock_update_metadata.call_count) - - @mock.patch.object(server.ServerProfile, '_update_password') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_check_server_name') - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_image') - def test_do_update_image_succeeded(self, mock_update_image, - mock_update_flavor, mock_check_name, - mock_check_password, mock_update_name, - mock_update_password): - mock_check_name.return_value = False, 'OLD_NAME' - mock_check_password.return_value = False, 'OLD_PASS' - mock_update_image.return_value = True - obj = mock.Mock() - obj.physical_id = 'FAKE_ID' - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - profile._computeclient.server_get = mock.Mock() - profile._computeclient.server_get.return_value = mock.Mock( - status=consts.VS_SHUTOFF) - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['image'] = 'FAKE_IMAGE_NEW' - new_profile = server.ServerProfile('t', new_spec) - - res = profile.do_update(obj, new_profile) - - self.assertTrue(res) - mock_update_image.assert_called_with( - obj, new_profile, 'OLD_NAME', 'OLD_PASS') - self.assertEqual(0, mock_update_name.call_count) - self.assertEqual(0, mock_update_password.call_count) - profile._computeclient.server_get.assert_called_once_with( - obj.physical_id) - profile._computeclient.server_start.assert_called_once_with( - obj.physical_id) - - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_update_metadata') - @mock.patch.object(server.ServerProfile, '_update_image') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_check_server_name') - def test_do_update_image_failed(self, mock_check_name, mock_check_password, - mock_update_image, mock_update_meta, - mock_update_name, mock_update_flavor): - mock_check_name.return_value = False, 'OLD_NAME' - mock_check_password.return_value = False, 'OLD_PASS' - # _update_image always throw EResourceUpdate - ex = exc.EResourceUpdate(type='server', id='NOVA_ID', - message='Image Not Found') - mock_update_image.side_effect = ex - obj = mock.Mock(physical_id='NOVA_ID') - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - # don't need to invent a new spec - new_spec = copy.deepcopy(self.spec) - new_profile = server.ServerProfile('t', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - obj, new_profile) - - mock_update_image.assert_called_with( - obj, new_profile, 'OLD_NAME', 'OLD_PASS') - self.assertEqual("Failed in updating server 'NOVA_ID': " - "Image Not Found.", str(ex)) - - @mock.patch.object(server.ServerProfile, '_update_flavor') - def test_do_update_update_flavor_succeeded(self, mock_update_flavor): - mock_update_flavor.return_value = True - obj = mock.Mock(physical_id='FAKE_ID') - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_SHUTOFF) - cc = mock.Mock() - cc.server_get.return_value = x_server - gc = mock.Mock() - profile._computeclient = cc - profile._glanceclient = gc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'FAKE_FLAVOR_NEW' - new_profile = server.ServerProfile('t', new_spec) - - res = profile.do_update(obj, new_profile) - self.assertTrue(res) - mock_update_flavor.assert_called_with(obj, new_profile) - gc.image_find.assert_called_with('FAKE_IMAGE', False) - cc.server_start.assert_called_once_with(obj.physical_id) - - @mock.patch.object(server.ServerProfile, '_update_flavor') - def test_do_update_update_flavor_failed(self, mock_update_flavor): - ex = exc.EResourceUpdate(type='server', id='NOVA_ID', - message='Flavor Not Found') - mock_update_flavor.side_effect = ex - obj = mock.Mock(physical_id='NOVA_ID') - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_ACTIVE) - cc = mock.Mock() - cc.server_get.return_value = x_server - gc = mock.Mock() - profile._computeclient = cc - profile._glanceclient = gc - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['flavor'] = 'FAKE_FLAVOR_NEW' - new_profile = server.ServerProfile('t', new_spec) - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - obj, new_profile) - - mock_update_flavor.assert_called_with(obj, new_profile) - self.assertEqual("Failed in updating server 'NOVA_ID': " - "Flavor Not Found.", - str(ex)) - gc.image_find.assert_called_with('FAKE_IMAGE', False) - - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_network') - def test_do_update_update_network_succeeded( - self, mock_update_network, mock_update_flavor): - mock_update_network.return_value = True, True - profile = server.ServerProfile('t', self.spec) - x_image = {'id': '123'} - x_server = mock.Mock(image=x_image, status=consts.VS_SHUTOFF) - cc = mock.Mock() - gc = mock.Mock() - cc.server_get.return_value = x_server - profile._computeclient = cc - profile._glanceclient = gc - - obj = mock.Mock(physical_id='NOVA_ID') - - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['networks'] = [ - {'network': 'new_net', 'port': 'new_port', 'fixed_ip': 'new-ip'} - ] - new_profile = server.ServerProfile('t', new_spec) - - params = {'cluster.stop_timeout_before_update': 134} - - res = profile.do_update(obj, new_profile=new_profile, **params) - - self.assertTrue(res) - gc.image_find.assert_called_with('FAKE_IMAGE', False) - mock_update_network.assert_called_with(obj, new_profile) - cc.server_start.assert_called_once_with(obj.physical_id) - self.assertEqual(profile.stop_timeout, - params['cluster.stop_timeout_before_update']) - - @mock.patch.object(server.ServerProfile, '_update_password') - @mock.patch.object(server.ServerProfile, '_check_password') - @mock.patch.object(server.ServerProfile, '_update_name') - @mock.patch.object(server.ServerProfile, '_check_server_name') - @mock.patch.object(server.ServerProfile, '_update_flavor') - @mock.patch.object(server.ServerProfile, '_update_metadata') - @mock.patch.object(server.ServerProfile, '_update_image') - @mock.patch.object(server.ServerProfile, '_update_network') - def test_do_update_update_network_failed( - self, mock_update_network, mock_update_image, mock_update_metadata, - mock_update_flavor, mock_check_name, mock_update_name, - mock_check_password, mock_update_password): - - mock_check_name.return_value = True, 'NEW_NAME' - mock_check_password.return_value = True, 'NEW_PASSWORD' - mock_update_image.return_value = True - err = exc.EResourceUpdate(type='server', id='NOVA_ID', message='BOOM') - mock_update_network.side_effect = err - - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - new_network = { - 'port': 'FAKE_PORT_NEW', - 'fixed_ip': 'FAKE_IP_NEW', - 'network': 'FAKE_NET_NEW', - } - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['networks'] = [new_network] - new_profile = server.ServerProfile('t', new_spec) - obj = mock.Mock(physical_id='NOVA_ID') - - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - obj, new_profile) - - self.assertEqual("Failed in updating server 'NOVA_ID': BOOM.", - str(ex)) - mock_check_name.assert_called_once_with(obj, new_profile) - mock_check_password.assert_called_once_with(obj, new_profile) - mock_update_image.assert_called_once_with( - obj, new_profile, 'NEW_NAME', 'NEW_PASSWORD') - self.assertEqual(0, mock_update_name.call_count) - self.assertEqual(0, mock_update_password.call_count) - mock_update_flavor.assert_called_once_with(obj, new_profile) - mock_update_network.assert_called_with(obj, new_profile) - self.assertEqual(0, mock_update_metadata.call_count) - - def test_do_update_without_profile(self): - profile = server.ServerProfile('t', self.spec) - obj = mock.Mock() - obj.physical_id = 'FAKE_ID' - new_profile = None - res = profile.do_update(obj, new_profile) - self.assertFalse(res) - - def test_do_update_no_physical_id(self): - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - node_obj = mock.Mock(physical_id=None) - new_profile = mock.Mock() - - # Test path where server doesn't exist - res = profile.do_update(node_obj, new_profile) - - self.assertFalse(res) - - def test_do_update_invalid_stop_timeout(self): - profile = server.ServerProfile('t', self.spec) - profile._computeclient = mock.Mock() - node_obj = mock.Mock(physical_id='NOVA_ID') - new_spec = copy.deepcopy(self.spec) - new_profile = server.ServerProfile('t', new_spec) - - params = {'cluster.stop_timeout_before_update': '123'} - ex = self.assertRaises(exc.EResourceUpdate, - profile.do_update, - node_obj, new_profile, **params) - - self.assertEqual("Failed in updating server 'NOVA_ID': " - "cluster.stop_timeout_before_update value must be of " - "type int.", - str(ex)) diff --git a/senlin/tests/unit/profiles/test_nova_server_validate.py b/senlin/tests/unit/profiles/test_nova_server_validate.py deleted file mode 100644 index e274092a8..000000000 --- a/senlin/tests/unit/profiles/test_nova_server_validate.py +++ /dev/null @@ -1,966 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import exception as exc -from senlin.profiles.os.nova import server -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - -spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'context': {}, - 'auto_disk_config': True, - 'availability_zone': 'FAKE_AZ', - 'block_device_mapping': [{ - 'device_name': 'FAKE_NAME', - 'volume_size': 1000, - }], - 'flavor': 'FLAV', - 'image': 'FAKE_IMAGE', - 'key_name': 'FAKE_KEYNAME', - "metadata": {"meta var": "meta val"}, - 'name': 'FAKE_SERVER_NAME', - 'networks': [{ - 'floating_ip': 'FAKE_FLOATING_IP', - 'floating_network': 'FAKE_FLOATING_NET', - 'security_groups': ['FAKE_SECURITY_GROUP'], - 'port': 'FAKE_PORT', - 'vnic_type': 'direct', - 'fixed_ip': 'FAKE_IP', - 'network': 'FAKE_NET', - 'subnet': 'FAKE_SUBNET', - }], - 'scheduler_hints': { - 'same_host': 'HOST_ID', - }, - } -} - - -class TestAvailabilityZoneValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:success', dict( - reason=None, - success=True, - validate_result=[['FAKE_AZ']], - result='FAKE_AZ', - exception=None, - message='')), - ('validate:driver_failure', dict( - reason=None, - success=False, - validate_result=exc.InternalError(message='BANG.'), - result='FAKE_AZ', - exception=exc.InternalError, - message='BANG.')), - ('validate:not_found', dict( - reason=None, - success=False, - validate_result=[[]], - result='FAKE_AZ', - exception=exc.InvalidSpec, - message=("The specified availability_zone 'FAKE_AZ' could " - "not be found"))), - ('create:success', dict( - reason='create', - success=True, - validate_result=[['FAKE_AZ']], - result='FAKE_AZ', - exception=None, - message='')), - ('create:driver_failure', dict( - reason='create', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FAKE_AZ', - exception=exc.EResourceCreation, - message='Failed in creating server: BANG.')), - ('create:not_found', dict( - reason='create', - success=False, - validate_result=[[]], - result='FAKE_AZ', - exception=exc.EResourceCreation, - message=("Failed in creating server: The specified " - "availability_zone 'FAKE_AZ' could not be found."))) - ] - - def setUp(self): - super(TestAvailabilityZoneValidation, self).setUp() - - self.cc = mock.Mock() - prof = server.ServerProfile('t', spec) - prof._computeclient = self.cc - self.profile = prof - - def test_validation(self): - self.cc.validate_azs.side_effect = self.validate_result - node = mock.Mock(id='NODE_ID') - - if self.success: - res = self.profile._validate_az(node, 'FAKE_AZ', self.reason) - self.assertEqual(self.result, res) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_az, - node, 'FAKE_AZ', self.reason) - self.assertEqual(self.message, str(ex)) - - self.cc.validate_azs.assert_called_once_with(['FAKE_AZ']) - - -class TestFlavorValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:success', dict( - reason=None, - success=True, - validate_result=[mock.Mock(id='FID', is_disabled=False)], - result='FID', - exception=None, - message='')), - ('validate:driver_failure', dict( - reason=None, - success=False, - validate_result=exc.InternalError(message='BANG.'), - result='FID', - exception=exc.InternalError, - message='BANG.')), - ('validate:not_found', dict( - reason=None, - success=False, - validate_result=exc.InternalError(code=404, message='BANG.'), - result='FID', - exception=exc.InvalidSpec, - message="The specified flavor 'FLAVOR' could not be found.")), - ('validate:disabled', dict( - reason=None, - success=False, - validate_result=[mock.Mock(id='FID', is_disabled=True)], - result='FID', - exception=exc.InvalidSpec, - message="The specified flavor 'FLAVOR' is disabled")), - ('create:success', dict( - reason='create', - success=True, - validate_result=[mock.Mock(id='FID', is_disabled=False)], - result='FID', - exception=None, - message='')), - ('create:driver_failure', dict( - reason='create', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message='Failed in creating server: BANG.')), - ('create:not_found', dict( - reason='create', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message="Failed in creating server: BANG.")), - ('create:disabled', dict( - reason='create', - success=False, - validate_result=[mock.Mock(id='FID', is_disabled=True)], - result='FID', - exception=exc.EResourceCreation, - message=("Failed in creating server: The specified flavor " - "'FLAVOR' is disabled."))), - ('update:success', dict( - reason='update', - success=True, - validate_result=[mock.Mock(id='FID', is_disabled=False)], - result='FID', - exception=None, - message='')), - ('update:driver_failure', dict( - reason='update', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ('update:not_found', dict( - reason='update', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ('update:disabled', dict( - reason='update', - success=False, - validate_result=[mock.Mock(id='FID', is_disabled=True)], - result='FID', - exception=exc.EResourceUpdate, - message=("Failed in updating server 'NOVA_ID': The specified " - "flavor 'FLAVOR' is disabled."))) - ] - - def setUp(self): - super(TestFlavorValidation, self).setUp() - - self.cc = mock.Mock() - self.profile = server.ServerProfile('t', spec) - self.profile._computeclient = self.cc - - def test_validation(self): - self.cc.flavor_find.side_effect = self.validate_result - node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID') - flavor = 'FLAVOR' - - if self.success: - res = self.profile._validate_flavor(node, flavor, self.reason) - self.assertIsNotNone(res) - self.assertEqual(self.result, res.id) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_flavor, - node, flavor, self.reason) - self.assertEqual(self.message, str(ex)) - - self.cc.flavor_find.assert_called_once_with(flavor, False) - - -class TestImageValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:success', dict( - reason=None, - success=True, - validate_result=[mock.Mock(id='IMAGE_ID')], - result='IMAGE_ID', - exception=None, - message='')), - ('validate:driver_failure', dict( - reason=None, - success=False, - validate_result=exc.InternalError(message='BANG.'), - result='FID', - exception=exc.InternalError, - message='BANG.')), - ('validate:not_found', dict( - reason=None, - success=False, - validate_result=exc.InternalError(code=404, message='BANG.'), - result='FID', - exception=exc.InvalidSpec, - message="The specified image 'IMAGE' could not be found.")), - ('create:success', dict( - reason='create', - success=True, - validate_result=[mock.Mock(id='IMAGE_ID')], - result='IMAGE_ID', - exception=None, - message='')), - ('create:driver_failure', dict( - reason='create', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message='Failed in creating server: BANG.')), - ('create:not_found', dict( - reason='create', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message="Failed in creating server: BANG.")), - ('update:success', dict( - reason='update', - success=True, - validate_result=[mock.Mock(id='IMAGE_ID')], - result='IMAGE_ID', - exception=None, - message='')), - ('update:driver_failure', dict( - reason='update', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ('update:not_found', dict( - reason='update', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ] - - def setUp(self): - super(TestImageValidation, self).setUp() - - self.cc = mock.Mock() - self.gc = mock.Mock() - self.profile = server.ServerProfile('t', spec) - self.profile._computeclient = self.cc - self.profile._glanceclient = self.gc - - def test_validation(self): - self.gc.image_find.side_effect = self.validate_result - node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID') - image = 'IMAGE' - - if self.success: - res = self.profile._validate_image(node, image, self.reason) - self.assertIsNotNone(res) - self.assertEqual(self.result, res.id) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_image, - node, image, self.reason) - self.assertEqual(self.message, str(ex)) - - self.gc.image_find.assert_called_once_with(image, False) - - -class TestVolumeValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:success', dict( - reason=None, - success=True, - validate_result=[mock.Mock(id='VOLUME_ID', status='available')], - result='VOLUME_ID', - exception=None, - message='')), - ('validate:failure', dict( - reason=None, - success=False, - validate_result=[mock.Mock(id='VOLUME_ID', status='in-use')], - result='VOLUME_ID', - exception=exc.InvalidSpec, - message="The volume VOLUME should be in 'available' " - "status but is in 'in-use' status.")), - ('validate:driver_failure', dict( - reason=None, - success=False, - validate_result=exc.InternalError(message='BANG.'), - result='FID', - exception=exc.InternalError, - message='BANG.')), - ('validate:not_found', dict( - reason=None, - success=False, - validate_result=exc.InternalError(code=404, message='BANG.'), - result='FID', - exception=exc.InvalidSpec, - message="The specified volume 'VOLUME' could not be found.")), - ('create:success', dict( - reason='create', - success=True, - validate_result=[mock.Mock(id='VOLUME_ID', status='available')], - result='VOLUME_ID', - exception=None, - message='')), - ('create:driver_failure', dict( - reason='create', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message='Failed in creating server: BANG.')), - ('create:not_found', dict( - reason='create', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message="Failed in creating server: BANG.")), - ] - - def setUp(self): - super(TestVolumeValidation, self).setUp() - bdm_v2 = [ - { - 'volume_size': 1, - 'uuid': '6ce0be68', - 'source_type': 'volume', - 'destination_type': 'volume', - 'boot_index': 0, - }, - ] - - volume_spec = { - 'type': 'os.nova.server', - 'version': '1.0', - 'properties': { - 'flavor': 'FLAV', - 'name': 'FAKE_SERVER_NAME', - 'security_groups': ['HIGH_SECURITY_GROUP'], - 'block_device_mapping_v2': bdm_v2, - } - } - - self.vc = mock.Mock() - self.profile = server.ServerProfile('t', volume_spec) - self.profile._block_storageclient = self.vc - - def test_validation(self): - self.vc.volume_get.side_effect = self.validate_result - node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID') - volume = 'VOLUME' - - if self.success: - res = self.profile._validate_volume(node, volume, self.reason) - self.assertIsNotNone(res) - self.assertEqual(self.result, res.id) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_volume, - node, volume, self.reason) - self.assertEqual(self.message, str(ex)) - - self.vc.volume_get.assert_called_once_with(volume) - - -class TestKeypairValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:success', dict( - reason=None, - success=True, - validate_result=[mock.Mock(id='KEY_ID')], - result='KEY_ID', - exception=None, - message='')), - ('validate:driver_failure', dict( - reason=None, - success=False, - validate_result=exc.InternalError(message='BANG.'), - result='FID', - exception=exc.InternalError, - message='BANG.')), - ('validate:not_found', dict( - reason=None, - success=False, - validate_result=exc.InternalError(code=404, message='BANG.'), - result='FID', - exception=exc.InvalidSpec, - message="The specified key_name 'KEY' could not be found.")), - ('create:success', dict( - reason='create', - success=True, - validate_result=[mock.Mock(id='IMAGE_ID')], - result='IMAGE_ID', - exception=None, - message='')), - ('create:driver_failure', dict( - reason='create', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message='Failed in creating server: BANG.')), - ('create:not_found', dict( - reason='create', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceCreation, - message="Failed in creating server: BANG.")), - ('update:success', dict( - reason='update', - success=True, - validate_result=[mock.Mock(id='KEY_ID')], - result='KEY_ID', - exception=None, - message='')), - ('update:driver_failure', dict( - reason='update', - success=False, - validate_result=exc.InternalError(message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ('update:not_found', dict( - reason='update', - success=False, - validate_result=exc.InternalError(code=404, message='BANG'), - result='FID', - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': BANG.")), - ] - - def setUp(self): - super(TestKeypairValidation, self).setUp() - - self.cc = mock.Mock() - self.profile = server.ServerProfile('t', spec) - self.profile._computeclient = self.cc - - def test_validation(self): - self.cc.keypair_find.side_effect = self.validate_result - node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID') - key = 'KEY' - - if self.success: - res = self.profile._validate_keypair(node, key, self.reason) - self.assertIsNotNone(res) - self.assertEqual(self.result, res.id) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_keypair, - node, key, self.reason) - self.assertEqual(self.message, str(ex)) - - self.cc.keypair_find.assert_called_once_with(key, False) - - -class TestNetworkValidation(base.SenlinTestCase): - - scenarios = [ - ('validate:net-n:port-n:fixed_ip-n:sgroups-n', dict( - reason=None, - success=True, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={'port': 'PORT_ID'}, - exception=None, - message='')), - ('validate:net-y:port-n:fixed_ip-n:sgroups-y', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'security_groups': ['default']}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[mock.Mock(id='SG_ID')], - floating_result=[], - result={'network': 'NET_ID', 'security_groups': ['SG_ID']}, - exception=None, - message='')), - ('validate:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'floating_network': 'NET'}, - net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'floating_network': 'NET_ID'}, - exception=None, - message='')), - ('validate:net-y:port-n:fixed_ip-n:floating_net-y:floating_ip-y', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'floating_network': 'NET', - 'floating_ip': 'FLOATINGIP'}, - net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')], - result={'network': 'NET_ID', 'floating_network': 'NET_ID', - 'floating_ip_id': 'FLOATINGIP_ID', - 'floating_ip': 'FLOATINGIP'}, - exception=None, - message='')), - ('validate:net-y:port-n:fixed_ip-y:sgroups-n', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'}, - exception=None, - message='')), - ('validate:net-f:port-y:fixed_ip-n:sgroups-n', dict( - reason=None, - success=False, - inputs={'network': 'NET', 'port': 'PORT'}, - net_result=[exc.InternalError(message='NET Failure')], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.InvalidSpec, - message='NET Failure')), - ('validate:net-n:port-f:fixed_ip-n', dict( - reason=None, - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[exc.InternalError(message='PORT Failure')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.InvalidSpec, - message='PORT Failure')), - ('validate:net-n:port-active:fixed_ip-n', dict( - reason=None, - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.InvalidSpec, - message='The status of the port PORT must be DOWN')), - ('validate:net-n:port-y:fixed_ip-n:floating_net-n:floating_ip-y', dict( - reason=None, - success=False, - inputs={'port': 'PORT', 'floating_ip': 'FLOATINGIP'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')], - result={}, - exception=exc.InvalidSpec, - message='Must specify a network to create floating IP')), - ('validate:net-n:port-y:fixed_ip-n:floating_ip-active', dict( - reason=None, - success=False, - inputs={'port': 'PORT', 'floating_network': 'NET', - 'floating_ip': 'FLOATINGIP'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[mock.Mock(id='FLOATINGIP_ID', status='ACTIVE')], - result={}, - exception=exc.InvalidSpec, - message='the floating IP FLOATINGIP has been used.')), - ('validate:net-n:port-n:fixed_ip-n', dict( - reason=None, - success=False, - inputs={'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.InvalidSpec, - message="One of 'port' and 'network' must be provided")), - ('validate:net-n:port-y:fixed_ip-y', dict( - reason=None, - success=False, - inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.InvalidSpec, - message=("The 'port' property and the 'fixed_ip' property cannot " - "be specified at the same time"))), - ('create:net-y:port-y:fixed_ip-n', dict( - reason='create', - success=True, - inputs={'network': 'NET', 'port': 'PORT'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'port': 'PORT_ID'}, - exception=None, - message='')), - ('create:net-y:port-n:fixed_ip-y', dict( - reason='create', - success=True, - inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'}, - exception=None, - message='')), - ('create:net-y:port-n:fixed_ip-n:sgroups-y', dict( - reason='create', - success=True, - inputs={'network': 'NET', 'security_groups': ['default']}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[mock.Mock(id='SG_ID')], - floating_result=[], - result={'network': 'NET_ID', 'security_groups': ['SG_ID']}, - exception=None, - message='')), - ('create:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'floating_network': 'NET'}, - net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'floating_network': 'NET_ID'}, - exception=None, - message='')), - ('create:net-f:port-y:fixed_ip-n', dict( - reason='create', - success=False, - inputs={'network': 'NET', 'port': 'PORT'}, - net_result=[exc.InternalError(message='NET Failure')], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceCreation, - message='Failed in creating server: NET Failure.')), - ('create:net-n:port-f:fixed_ip-n', dict( - reason='create', - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[exc.InternalError(message='PORT Failure')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceCreation, - message='Failed in creating server: PORT Failure.')), - ('create:net-n:port-active:fixed_ip-n', dict( - reason='create', - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceCreation, - message=('Failed in creating server: The status of the port PORT ' - 'must be DOWN.'))), - ('create:net-n:port-n:fixed_ip-n', dict( - reason='create', - success=False, - inputs={'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceCreation, - message=("Failed in creating server: One of 'port' " - "and 'network' must be provided."))), - ('create:net-n:port-y:fixed_ip-y', dict( - reason='create', - success=False, - inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceCreation, - message=("Failed in creating server: The 'port' property and the " - "'fixed_ip' property cannot be specified at the same " - "time."))), - ('update:net-y:port-y:fixed_ip-n', dict( - reason='update', - success=True, - inputs={'network': 'NET', 'port': 'PORT'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'port': 'PORT_ID'}, - exception=None, - message='')), - ('update:net-y:port-n:fixed_ip-y', dict( - reason='update', - success=True, - inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', - 'fixed_ip': 'FIXED_IP'}, - exception=None, - message='')), - ('update:net-y:port-n:fixed_ip-n:sgroups-y', dict( - reason='create', - success=True, - inputs={'network': 'NET', 'security_groups': ['default']}, - net_result=[mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[mock.Mock(id='SG_ID')], - floating_result=[], - result={'network': 'NET_ID', 'security_groups': ['SG_ID']}, - exception=None, - message='')), - ('update:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict( - reason=None, - success=True, - inputs={'network': 'NET', 'floating_network': 'NET'}, - net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')], - port_result=[], - sg_result=[], - floating_result=[], - result={'network': 'NET_ID', 'floating_network': 'NET_ID'}, - exception=None, - message='')), - ('update:net-f:port-y:fixed_ip-n', dict( - reason='update', - success=False, - inputs={'network': 'NET', 'port': 'PORT'}, - net_result=[exc.InternalError(message='NET Failure')], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': NET Failure.")), - ('update:net-n:port-f:fixed_ip-n', dict( - reason='update', - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[exc.InternalError(message='PORT Failure')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceUpdate, - message="Failed in updating server 'NOVA_ID': PORT Failure.")), - ('update:net-n:port-active:fixed_ip-n', dict( - reason='update', - success=False, - inputs={'port': 'PORT'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceUpdate, - message=("Failed in updating server 'NOVA_ID': The status of the " - "port PORT must be DOWN."))), - ('update:net-n:port-n:fixed_ip-n', dict( - reason='update', - success=False, - inputs={'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceUpdate, - message=("Failed in updating server 'NOVA_ID': One of 'port' " - "and 'network' must be provided."))), - ('update:net-n:port-y:fixed_ip-y', dict( - reason='update', - success=False, - inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'}, - net_result=[], - port_result=[mock.Mock(id='PORT_ID', status='DOWN')], - sg_result=[], - floating_result=[], - result={}, - exception=exc.EResourceUpdate, - message=("Failed in updating server 'NOVA_ID': The 'port' " - "property and the 'fixed_ip' property cannot be " - "specified at the same time."))), - ] - - def setUp(self): - super(TestNetworkValidation, self).setUp() - - self.nc = mock.Mock() - self.profile = server.ServerProfile('t', spec) - self.profile._networkclient = self.nc - - def test_validation(self): - self.nc.network_get.side_effect = self.net_result - self.nc.port_find.side_effect = self.port_result - self.nc.security_group_find.side_effect = self.sg_result - self.nc.floatingip_find.side_effect = self.floating_result - obj = mock.Mock(physical_id='NOVA_ID') - - if self.success: - res = self.profile._validate_network(obj, self.inputs, self.reason) - self.assertEqual(self.result, res) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_network, - obj, self.inputs, self.reason) - self.assertEqual(self.message, str(ex)) - - if self.net_result: - self.nc.network_get.assert_called_with('NET') - if self.port_result: - self.nc.port_find.assert_called_once_with('PORT') - if self.sg_result: - self.nc.security_group_find.assert_called_once_with( - 'default', project_id=None) - if self.floating_result: - self.nc.floatingip_find.assert_called_once_with('FLOATINGIP') - - def test_validation_with_project_scope(self): - self.nc.network_get.side_effect = self.net_result - self.nc.port_find.side_effect = self.port_result - self.nc.security_group_find.side_effect = self.sg_result - self.nc.floatingip_find.side_effect = self.floating_result - obj = mock.Mock(physical_id='NOVA_ID') - - self.profile.project_scope = True - self.profile.project = 'FAKE_PROJECT_ID' - - if self.success: - res = self.profile._validate_network(obj, self.inputs, self.reason) - self.assertEqual(self.result, res) - else: - ex = self.assertRaises(self.exception, - self.profile._validate_network, - obj, self.inputs, self.reason) - self.assertEqual(self.message, str(ex)) - - if self.net_result: - self.nc.network_get.assert_called_with('NET') - if self.port_result: - self.nc.port_find.assert_called_once_with('PORT') - if self.sg_result: - self.nc.security_group_find.assert_called_once_with( - 'default', project_id='FAKE_PROJECT_ID') - if self.floating_result: - self.nc.floatingip_find.assert_called_once_with('FLOATINGIP') - - -class TestNovaServerValidate(base.SenlinTestCase): - - def setUp(self): - super(TestNovaServerValidate, self).setUp() - - self.context = utils.dummy_context() - - def test_do_validate_all_passed(self): - profile = server.ServerProfile('t', spec) - mock_az = self.patchobject(profile, '_validate_az') - mock_flavor = self.patchobject(profile, '_validate_flavor') - mock_image = self.patchobject(profile, '_validate_image') - mock_keypair = self.patchobject(profile, '_validate_keypair') - mock_network = self.patchobject(profile, '_validate_network') - obj = mock.Mock() - - res = profile.do_validate(obj) - - properties = spec['properties'] - self.assertTrue(res) - mock_az.assert_called_once_with(obj, properties['availability_zone']) - mock_flavor.assert_called_once_with(obj, properties['flavor']) - mock_image.assert_called_once_with(obj, properties['image']) - mock_keypair.assert_called_once_with(obj, properties['key_name']) - mock_network.assert_called_once_with(obj, properties['networks'][0]) diff --git a/senlin/tests/unit/profiles/test_profile_base.py b/senlin/tests/unit/profiles/test_profile_base.py deleted file mode 100644 index d344b8de5..000000000 --- a/senlin/tests/unit/profiles/test_profile_base.py +++ /dev/null @@ -1,1007 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -from unittest import mock - -from oslo_context import context as oslo_ctx - -from senlin.common import consts -from senlin.common import context as senlin_ctx -from senlin.common import exception -from senlin.common import schema -from senlin.common import utils as common_utils -from senlin.engine import environment -from senlin.engine import parser -from senlin.objects import credential as co -from senlin.objects import profile as po -from senlin.profiles import base as pb -from senlin.profiles.os.nova import server as nova_server -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -sample_profile = """ - type: os.dummy - version: 1.0 - properties: - key1: value1 - key2: 2 -""" - - -class DummyProfile(pb.Profile): - - VERSION = '1.0' - CONTEXT = 'context' - - properties_schema = { - CONTEXT: schema.Map( - 'context data' - ), - 'key1': schema.String( - 'first key', - default='value1', - updatable=True, - ), - 'key2': schema.Integer( - 'second key', - required=True, - updatable=True, - ), - 'key3': schema.String( - 'third key', - ), - } - OPERATIONS = { - 'op1': schema.Operation( - 'Operation 1', - schema={ - 'param1': schema.StringParam( - 'description of param1', - ) - } - ) - } - - def __init__(self, name, spec, **kwargs): - super(DummyProfile, self).__init__(name, spec, **kwargs) - - -class TestProfileBase(base.SenlinTestCase): - - def setUp(self): - super(TestProfileBase, self).setUp() - self.ctx = utils.dummy_context(project='profile_test_project') - g_env = environment.global_env() - g_env.register_profile('os.dummy-1.0', DummyProfile) - g_env.register_profile('os.dummy-1.1', DummyProfile) - self.spec = parser.simple_parse(sample_profile) - - def _create_profile(self, name, pid=None, context=None): - profile = pb.Profile(name, self.spec, - user=self.ctx.user_id, - project=self.ctx.project_id, - domain=self.ctx.domain_id, - context=context) - if pid: - profile.id = pid - profile.context = context - - return profile - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init(self, mock_creds): - mock_creds.return_value = {'foo': 'bar'} - profile = self._create_profile('test-profile') - - self.assertIsNone(profile.id) - self.assertEqual('test-profile', profile.name) - self.assertEqual(self.spec, profile.spec) - self.assertEqual('os.dummy', profile.type_name) - self.assertEqual('1.0', profile.version) - self.assertEqual('os.dummy-1.0', profile.type) - self.assertEqual(self.ctx.user_id, profile.user) - self.assertEqual(self.ctx.project_id, profile.project) - self.assertEqual(self.ctx.domain_id, profile.domain) - self.assertEqual({}, profile.metadata) - self.assertIsNone(profile.created_at) - self.assertIsNone(profile.updated_at) - - spec_data = profile.spec_data - self.assertEqual('os.dummy', spec_data['type']) - self.assertEqual('1.0', spec_data['version']) - self.assertEqual('value1', spec_data['properties']['key1']) - self.assertEqual(2, spec_data['properties']['key2']) - self.assertEqual('value1', profile.properties['key1']) - self.assertEqual(2, profile.properties['key2']) - self.assertEqual({'foo': 'bar'}, profile.context) - - self.assertIsNone(profile._computeclient) - self.assertIsNone(profile._networkclient) - self.assertIsNone(profile._orchestrationclient) - self.assertIsNone(profile._block_storageclient) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_version_as_float(self, mock_creds): - mock_creds.return_value = {'foo': 'bar'} - self.spec['version'] = 1.1 - profile = self._create_profile('test-profile') - - self.assertIsNone(profile.id) - self.assertEqual('test-profile', profile.name) - self.assertEqual(self.spec, profile.spec) - self.assertEqual('os.dummy', profile.type_name) - self.assertEqual('1.1', profile.version) - self.assertEqual('os.dummy-1.1', profile.type) - self.assertEqual(self.ctx.user_id, profile.user) - self.assertEqual(self.ctx.project_id, profile.project) - self.assertEqual(self.ctx.domain_id, profile.domain) - self.assertEqual({}, profile.metadata) - self.assertIsNone(profile.created_at) - self.assertIsNone(profile.updated_at) - - spec_data = profile.spec_data - self.assertEqual('os.dummy', spec_data['type']) - self.assertEqual('1.1', spec_data['version']) - self.assertEqual('value1', spec_data['properties']['key1']) - self.assertEqual(2, spec_data['properties']['key2']) - self.assertEqual('value1', profile.properties['key1']) - self.assertEqual(2, profile.properties['key2']) - self.assertEqual({'foo': 'bar'}, profile.context) - - self.assertIsNone(profile._computeclient) - self.assertIsNone(profile._networkclient) - self.assertIsNone(profile._orchestrationclient) - self.assertIsNone(profile._block_storageclient) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_version_as_string(self, mock_creds): - mock_creds.return_value = {'foo': 'bar'} - self.spec['version'] = '1.1' - profile = self._create_profile('test-profile') - - self.assertIsNone(profile.id) - self.assertEqual('test-profile', profile.name) - self.assertEqual(self.spec, profile.spec) - self.assertEqual('os.dummy', profile.type_name) - self.assertEqual('1.1', profile.version) - self.assertEqual('os.dummy-1.1', profile.type) - self.assertEqual(self.ctx.user_id, profile.user) - self.assertEqual(self.ctx.project_id, profile.project) - self.assertEqual(self.ctx.domain_id, profile.domain) - self.assertEqual({}, profile.metadata) - self.assertIsNone(profile.created_at) - self.assertIsNone(profile.updated_at) - - spec_data = profile.spec_data - self.assertEqual('os.dummy', spec_data['type']) - self.assertEqual('1.1', spec_data['version']) - self.assertEqual('value1', spec_data['properties']['key1']) - self.assertEqual(2, spec_data['properties']['key2']) - self.assertEqual('value1', profile.properties['key1']) - self.assertEqual(2, profile.properties['key2']) - self.assertEqual({'foo': 'bar'}, profile.context) - - self.assertIsNone(profile._computeclient) - self.assertIsNone(profile._networkclient) - self.assertIsNone(profile._orchestrationclient) - self.assertIsNone(profile._block_storageclient) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_with_context(self, mock_creds): - mock_creds.return_value = {'foo': 'bar'} - profile = self._create_profile('test-profile', - pid='FAKE_ID', context={'bar': 'foo'}) - self.assertEqual({'bar': 'foo'}, profile.context) - - def test_init_bad_type(self): - bad_spec = { - 'type': 'bad-type', - 'version': '1.0', - 'properties': '', - } - - self.assertRaises(exception.ResourceNotFound, - pb.Profile, - 'test-profile', bad_spec) - - def test_init_validation_error(self): - bad_spec = copy.deepcopy(self.spec) - del bad_spec['version'] - - ex = self.assertRaises(exception.ESchema, - pb.Profile, 'test-profile', bad_spec) - self.assertEqual("The 'version' key is missing from the provided " - "spec map.", str(ex)) - - def test_from_object(self): - obj = self._create_profile('test_profile_for_record') - obj.store(self.ctx) - profile = po.Profile.get(self.ctx, obj.id) - - result = pb.Profile._from_object(profile) - - self.assertEqual(profile.id, result.id) - self.assertEqual(profile.name, result.name) - self.assertEqual(profile.type, result.type) - self.assertEqual(profile.user, result.user) - self.assertEqual(profile.project, result.project) - self.assertEqual(profile.domain, result.domain) - self.assertEqual(profile.spec, result.spec) - self.assertEqual(profile.metadata, result.metadata) - self.assertEqual('value1', result.properties['key1']) - self.assertEqual(2, result.properties['key2']) - - self.assertEqual(profile.created_at, result.created_at) - self.assertEqual(profile.updated_at, result.updated_at) - self.assertEqual(profile.context, result.context) - - def test_load_with_poect(self): - obj = self._create_profile('test-profile-bb') - profile_id = obj.store(self.ctx) - profile = po.Profile.get(self.ctx, profile_id) - - result = pb.Profile.load(self.ctx, profile=profile) - - self.assertEqual(profile.id, result.id) - - def test_load_with_profile_id(self): - obj = self._create_profile('test-profile-cc') - profile_id = obj.store(self.ctx) - - result = pb.Profile.load(self.ctx, profile_id=profile_id) - - self.assertEqual(obj.id, result.id) - - def test_load_with_both(self): - profile = self._create_profile('test1') - profile.store(self.ctx) - db_profile = po.Profile.get(self.ctx, profile.id) - - res = pb.Profile.load(self.ctx, profile=db_profile, - profile_id=profile.id) - - self.assertEqual(profile.id, res.id) - - @mock.patch.object(po.Profile, 'get') - def test_load_not_found(self, mock_get): - mock_get.return_value = None - self.assertRaises(exception.ResourceNotFound, - pb.Profile.load, - self.ctx, profile_id='FAKE_ID') - mock_get.assert_called_once_with(self.ctx, 'FAKE_ID', - project_safe=True) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_create(self, mock_creds): - mock_creds.return_value = {} - res = pb.Profile.create(self.ctx, 'my_profile', self.spec) - - self.assertIsInstance(res, pb.Profile) - - obj = po.Profile.get(self.ctx, res.id) - self.assertEqual('my_profile', obj.name) - - def test_create_profile_type_not_found(self): - spec = copy.deepcopy(self.spec) - spec['type'] = "bogus" - ex = self.assertRaises(exception.InvalidSpec, - pb.Profile.create, - self.ctx, 'my_profile', spec) - - self.assertEqual("Failed in creating profile my_profile: The " - "profile_type 'bogus-1.0' could not be found.", - str(ex)) - - @mock.patch.object(pb.Profile, 'validate') - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_create_failed_validation(self, mock_creds, mock_validate): - mock_creds.return_value = {} - mock_validate.side_effect = exception.ESchema(message="Boom") - - ex = self.assertRaises(exception.InvalidSpec, - pb.Profile.create, - self.ctx, 'my_profile', self.spec) - - self.assertEqual("Failed in creating profile my_profile: " - "Boom", str(ex)) - - @mock.patch.object(po.Profile, 'delete') - def test_delete(self, mock_delete): - res = pb.Profile.delete(self.ctx, 'FAKE_ID') - self.assertIsNone(res) - mock_delete.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(po.Profile, 'delete') - def test_delete_busy(self, mock_delete): - err = exception.EResourceBusy(type='profile', id='FAKE_ID') - mock_delete.side_effect = err - self.assertRaises(exception.EResourceBusy, - pb.Profile.delete, - self.ctx, 'FAKE_ID') - mock_delete.assert_called_once_with(self.ctx, 'FAKE_ID') - - @mock.patch.object(po.Profile, 'delete') - def test_delete_not_found(self, mock_delete): - mock_delete.return_value = None - result = pb.Profile.delete(self.ctx, 'BOGUS') - self.assertIsNone(result) - mock_delete.assert_called_once_with(self.ctx, 'BOGUS') - - @mock.patch.object(po.Profile, 'create') - def test_store_for_create(self, mock_create): - profile = self._create_profile('test-profile') - self.assertIsNone(profile.id) - self.assertIsNone(profile.created_at) - - mock_create.return_value = mock.Mock(id='FAKE_ID') - - profile_id = profile.store(self.ctx) - - mock_create.assert_called_once_with( - self.ctx, - { - 'name': profile.name, - 'type': profile.type, - 'context': profile.context, - 'spec': profile.spec, - 'user': profile.user, - 'project': profile.project, - 'domain': profile.domain, - 'meta_data': profile.metadata, - 'created_at': mock.ANY, - } - ) - self.assertEqual('FAKE_ID', profile_id) - self.assertIsNotNone(profile.created_at) - - @mock.patch.object(po.Profile, 'update') - def test_store_for_update(self, mock_update): - profile = self._create_profile('test-profile') - self.assertIsNone(profile.id) - self.assertIsNone(profile.updated_at) - profile.id = 'FAKE_ID' - - profile_id = profile.store(self.ctx) - self.assertEqual('FAKE_ID', profile.id) - mock_update.assert_called_once_with( - self.ctx, - 'FAKE_ID', - { - 'name': profile.name, - 'type': profile.type, - 'context': profile.context, - 'spec': profile.spec, - 'user': profile.user, - 'project': profile.project, - 'domain': profile.domain, - 'meta_data': profile.metadata, - 'updated_at': mock.ANY, - } - ) - self.assertIsNotNone(profile.updated_at) - self.assertEqual('FAKE_ID', profile_id) - - @mock.patch.object(pb.Profile, 'load') - def test_create_object(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.create_object(self.ctx, obj) - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_create.assert_called_once_with(obj) - res_obj = profile.do_create.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_check_object(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.check_object(self.ctx, obj) - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_check.assert_called_once_with(obj) - res_obj = profile.do_check.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_delete_object(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.delete_object(self.ctx, obj) - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_delete.assert_called_once_with(obj) - res_obj = profile.do_delete.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_check_object_exception_return_value(self, mock_load): - profile = pb.Profile - profile.load(self.ctx).do_check = mock.Mock( - side_effect=exception.InternalError(code=400, message='BAD')) - obj = mock_load - - self.assertRaises(exception.InternalError, profile.check_object, - self.ctx, obj) - - profile.load(self.ctx).do_check.assert_called_once_with(obj) - - @mock.patch.object(pb.Profile, 'load') - def test_update_object_with_profile(self, mock_load): - old_profile = mock.Mock() - new_profile = mock.Mock() - - mock_load.side_effect = [old_profile, new_profile] - obj = mock.Mock() - obj.profile_id = 'OLD_ID' - - res = pb.Profile.update_object(self.ctx, obj, - new_profile_id='NEW_ID', foo='bar') - - mock_load.assert_has_calls([ - mock.call(self.ctx, profile_id='OLD_ID'), - mock.call(self.ctx, profile_id='NEW_ID'), - ]) - - old_profile.do_update.assert_called_once_with(obj, new_profile, - foo='bar') - res_obj = old_profile.do_update.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_update_object_without_profile(self, mock_load): - profile = mock.Mock() - - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.update_object(self.ctx, obj, foo='bar', zoo='car') - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_update.assert_called_once_with(obj, None, - foo='bar', zoo='car') - res_obj = profile.do_update.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_recover_object(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.recover_object(self.ctx, obj, foo='bar', zoo='car') - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_recover.assert_called_once_with(obj, foo='bar', zoo='car') - res_obj = profile.do_recover.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_get_details(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.get_details(self.ctx, obj) - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_get_details.assert_called_once_with(obj) - res_obj = profile.do_get_details.return_value - self.assertEqual(res_obj, res) - - def test_get_schema(self): - expected = { - 'context': { - 'description': 'context data', - 'required': False, - 'updatable': False, - 'type': 'Map' - }, - 'key1': { - 'default': 'value1', - 'description': 'first key', - 'required': False, - 'updatable': True, - 'type': 'String', - }, - 'key2': { - 'description': 'second key', - 'required': True, - 'updatable': True, - 'type': 'Integer' - }, - 'key3': { - 'description': 'third key', - 'required': False, - 'updatable': False, - 'type': 'String' - }, - } - - actual = DummyProfile.get_schema() - self.assertEqual(expected, actual) - - def test_get_ops(self): - expected = { - 'op1': { - 'description': 'Operation 1', - 'parameters': { - 'param1': { - 'type': 'String', - 'required': False, - 'description': 'description of param1', - } - } - }, - } - - actual = DummyProfile.get_ops() - self.assertEqual(expected, actual) - - @mock.patch.object(nova_server.ServerProfile, 'do_adopt') - def test_adopt_node(self, mock_adopt): - obj = mock.Mock() - - res = pb.Profile.adopt_node(self.ctx, obj, "os.nova.server-1.0", - overrides=None, snapshot=False) - - mock_adopt.assert_called_once_with(obj, overrides=None, snapshot=False) - res_obj = mock_adopt.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_join_cluster(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.join_cluster(self.ctx, obj, 'CLUSTER_ID') - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_join.assert_called_once_with(obj, 'CLUSTER_ID') - res_obj = profile.do_join.return_value - self.assertEqual(res_obj, res) - - @mock.patch.object(pb.Profile, 'load') - def test_leave_cluster(self, mock_load): - profile = mock.Mock() - mock_load.return_value = profile - obj = mock.Mock() - obj.profile_id = 'FAKE_ID' - - res = pb.Profile.leave_cluster(self.ctx, obj) - - mock_load.assert_called_once_with(self.ctx, profile_id='FAKE_ID') - profile.do_leave.assert_called_once_with(obj) - res_obj = profile.do_leave.return_value - self.assertEqual(res_obj, res) - - def test_validate_without_properties(self): - profile = self._create_profile('test_profile') - - profile.do_validate = mock.Mock() - - profile.validate() - - profile.do_validate.assert_not_called() - - def test_validate_with_properties(self): - profile = self._create_profile('test_profile') - - profile.do_validate = mock.Mock() - - profile.validate(validate_props=True) - - profile.do_validate.assert_called_once_with(obj=profile) - - def test_validate_bad_context(self): - spec = { - "type": "os.dummy", - "version": "1.0", - "properties": { - "context": { - "foo": "bar" - }, - "key1": "value1", - "key2": 2, - } - } - profile = DummyProfile("p-bad-ctx", spec, user=self.ctx.user_id, - project=self.ctx.project_id, - domain=self.ctx.domain_id) - - self.assertRaises(exception.ESchema, profile.validate) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_context(self, mock_creds): - fake_ctx = mock.Mock() - mock_creds.return_value = fake_ctx - - # _init_context() is called from __init__ - self._create_profile('test-profile') - - # cannot determin the result in this case, we only test none or not - fake_ctx.pop.assert_has_calls([ - mock.call('project_name', None), - mock.call('project_domain_name', None), - ]) - mock_creds.assert_called_once_with() - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_context_for_real(self, mock_creds): - fake_ctx = { - 'project_name': 'this project', - 'project_domain_name': 'this domain', - 'auth_url': 'some url', - 'user_id': 'fake_user', - 'foo': 'bar', - } - mock_creds.return_value = fake_ctx - - # _init_context() is called from __init__ - profile = self._create_profile('test-profile') - - mock_creds.assert_called_once_with() - expected = { - 'auth_url': 'some url', - 'user_id': 'fake_user', - 'foo': 'bar', - } - self.assertEqual(expected, profile.context) - - @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test_init_context_for_real_with_data(self, mock_creds): - fake_ctx = { - 'project_name': 'this project', - 'project_domain_name': 'this domain', - 'auth_url': 'some url', - 'user_id': 'fake_user', - 'foo': 'bar', - } - mock_creds.return_value = fake_ctx - self.spec['properties']['context'] = { - 'region_name': 'region_dist' - } - - # _init_context() is called from __init__ - profile = self._create_profile('test-profile') - - mock_creds.assert_called_once_with(region_name='region_dist') - expected = { - 'auth_url': 'some url', - 'user_id': 'fake_user', - 'foo': 'bar', - } - self.assertEqual(expected, profile.context) - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params(self, mock_current, mock_get): - profile = self._create_profile('test-profile') - profile.context = {'foo': 'bar'} - fake_cred = mock.Mock(cred={'openstack': {'trust': 'TRUST_ID'}}) - mock_get.return_value = fake_cred - fake_ctx = mock.Mock() - mock_current.return_value = fake_ctx - - user = 'FAKE_USER' - project = 'FAKE_PROJECT' - - res = profile._build_conn_params(user, project) - expected = { - 'foo': 'bar', - 'trust_id': 'TRUST_ID', - } - self.assertEqual(expected, res) - mock_current.assert_called_once_with() - mock_get.assert_called_once_with(fake_ctx, 'FAKE_USER', 'FAKE_PROJECT') - - @mock.patch.object(co.Credential, 'get') - @mock.patch.object(oslo_ctx, 'get_current') - def test_build_conn_params_trust_not_found(self, mock_current, mock_get): - profile = self._create_profile('test-profile') - mock_get.return_value = None - fake_ctx = mock.Mock() - mock_current.return_value = fake_ctx - - self.assertRaises(exception.TrustNotFound, - profile._build_conn_params, - 'FAKE_USER', 'FAKE_PROJECT') - - mock_current.assert_called_once_with() - mock_get.assert_called_once_with(fake_ctx, 'FAKE_USER', 'FAKE_PROJECT') - - @mock.patch.object(pb.Profile, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_compute(self, mock_senlindriver, mock_params): - obj = mock.Mock() - sd = mock.Mock() - cc = mock.Mock() - sd.compute.return_value = cc - mock_senlindriver.return_value = sd - fake_params = mock.Mock() - mock_params.return_value = fake_params - profile = self._create_profile('test-profile') - - res = profile.compute(obj) - - self.assertEqual(cc, res) - self.assertEqual(cc, profile._computeclient) - mock_params.assert_called_once_with(obj.user, obj.project) - sd.compute.assert_called_once_with(fake_params) - - def test_compute_with_cache(self): - cc = mock.Mock() - profile = self._create_profile('test-profile') - profile._computeclient = cc - - res = profile.compute(mock.Mock()) - - self.assertEqual(cc, res) - - @mock.patch.object(pb.Profile, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_glance_client(self, mock_senlindriver, mock_params): - obj = mock.Mock() - sd = mock.Mock() - gc = mock.Mock() - sd.glance.return_value = gc - mock_senlindriver.return_value = sd - fake_params = mock.Mock() - mock_params.return_value = fake_params - profile = self._create_profile('test-profile') - - res = profile.glance(obj) - - self.assertEqual(gc, res) - self.assertEqual(gc, profile._glanceclient) - mock_params.assert_called_once_with(obj.user, obj.project) - sd.glance.assert_called_once_with(fake_params) - - @mock.patch.object(pb.Profile, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_neutron_client(self, mock_senlindriver, mock_params): - obj = mock.Mock() - sd = mock.Mock() - nc = mock.Mock() - sd.network.return_value = nc - mock_senlindriver.return_value = sd - fake_params = mock.Mock() - mock_params.return_value = fake_params - profile = self._create_profile('test-profile') - - res = profile.network(obj) - - self.assertEqual(nc, res) - self.assertEqual(nc, profile._networkclient) - mock_params.assert_called_once_with(obj.user, obj.project) - sd.network.assert_called_once_with(fake_params) - - @mock.patch.object(pb.Profile, '_build_conn_params') - @mock.patch("senlin.drivers.base.SenlinDriver") - def test_cinder_client(self, mock_senlindriver, mock_params): - obj = mock.Mock() - sd = mock.Mock() - nc = mock.Mock() - sd.block_storage.return_value = nc - mock_senlindriver.return_value = sd - fake_params = mock.Mock() - mock_params.return_value = fake_params - profile = self._create_profile('test-profile') - - res = profile.block_storage(obj) - - self.assertEqual(nc, res) - self.assertEqual(nc, profile._block_storageclient) - mock_params.assert_called_once_with(obj.user, obj.project) - sd.block_storage.assert_called_once_with(fake_params) - - def test_interface_methods(self): - profile = self._create_profile('test-profile') - - self.assertRaises(NotImplementedError, profile.do_create, mock.Mock()) - self.assertRaises(NotImplementedError, profile.do_delete, mock.Mock()) - self.assertTrue(profile.do_update(mock.Mock(), mock.Mock())) - self.assertTrue(profile.do_check(mock.Mock())) - self.assertEqual({}, profile.do_get_details(mock.Mock())) - self.assertTrue(profile.do_join(mock.Mock(), mock.Mock())) - self.assertTrue(profile.do_leave(mock.Mock())) - self.assertTrue(profile.do_validate(mock.Mock())) - - def test_do_recover_default(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_create', return_value=True) - self.patchobject(profile, 'do_delete', return_value=True) - - res, status = profile.do_recover(mock.Mock(), - operation=consts.RECOVER_RECREATE) - self.assertTrue(status) - - res, status = profile.do_recover( - mock.Mock(), operation='bar') - self.assertFalse(status) - - def test_do_recover_with_fencing(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_create', return_value=True) - self.patchobject(profile, 'do_delete', return_value=True) - obj = mock.Mock() - - res = profile.do_recover(obj, ignore_missing=True, - params={"fence_compute": True}, - operation=consts.RECOVER_RECREATE) - - self.assertTrue(res) - profile.do_delete.assert_called_once_with(obj, force=False, - timeout=None) - profile.do_create.assert_called_once_with(obj) - - def test_do_recover_with_delete_timeout(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_create', return_value=True) - self.patchobject(profile, 'do_delete', return_value=True) - obj = mock.Mock() - - res = profile.do_recover(obj, ignore_missing=True, delete_timeout=5, - operation=consts.RECOVER_RECREATE) - - self.assertTrue(res) - profile.do_delete.assert_called_once_with(obj, force=False, - timeout=5) - profile.do_create.assert_called_once_with(obj) - - def test_do_recover_with_force_recreate(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_create', return_value=True) - self.patchobject(profile, 'do_delete', return_value=True) - obj = mock.Mock() - - res = profile.do_recover(obj, ignore_missing=True, force_recreate=True, - operation=consts.RECOVER_RECREATE) - - self.assertTrue(res) - profile.do_delete.assert_called_once_with(obj, force=False, - timeout=None) - profile.do_create.assert_called_once_with(obj) - - def test_do_recover_with_force_recreate_failed_delete(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_create', return_value=True) - err = exception.EResourceDeletion(type='STACK', id='ID', - message='BANG') - self.patchobject(profile, 'do_delete', side_effect=err) - obj = mock.Mock() - - res = profile.do_recover(obj, ignore_missing=True, force_recreate=True, - operation=consts.RECOVER_RECREATE) - self.assertTrue(res) - profile.do_delete.assert_called_once_with(obj, force=False, - timeout=None) - profile.do_create.assert_called_once_with(obj) - - def test_do_recover_with_false_force_recreate_failed_delete(self): - profile = self._create_profile('test-profile') - err = exception.EResourceDeletion(type='STACK', id='ID', - message='BANG') - self.patchobject(profile, 'do_delete', side_effect=err) - operation = "RECREATE" - - ex = self.assertRaises(exception.EResourceOperation, - profile.do_recover, - mock.Mock(id='NODE_ID'), operation=operation, - force_recreate=False) - self.assertEqual("Failed in recovering node 'NODE_ID': " - "Failed in deleting STACK 'ID': BANG.", - str(ex)) - - def test_do_recover_with_recreate_succeeded(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_delete', return_value=True) - self.patchobject(profile, 'do_create', return_value=True) - operation = "RECREATE" - res = profile.do_recover(mock.Mock(), operation=operation) - - self.assertTrue(res) - - def test_do_recover_with_recreate_failed_delete(self): - profile = self._create_profile('test-profile') - err = exception.EResourceDeletion(type='STACK', id='ID', - message='BANG') - self.patchobject(profile, 'do_delete', side_effect=err) - operation = "RECREATE" - - ex = self.assertRaises(exception.EResourceOperation, - profile.do_recover, - mock.Mock(id='NODE_ID'), operation=operation) - self.assertEqual("Failed in recovering node 'NODE_ID': " - "Failed in deleting STACK 'ID': BANG.", - str(ex)) - - def test_do_recover_with_recreate_failed_create(self): - profile = self._create_profile('test-profile') - self.patchobject(profile, 'do_delete', return_value=True) - err = exception.EResourceCreation(type='STACK', message='BANNG') - self.patchobject(profile, 'do_create', side_effect=err) - operation = "RECREATE" - - ex = self.assertRaises(exception.EResourceOperation, - profile.do_recover, - mock.Mock(id='NODE_ID'), operation=operation) - - msg = ("Failed in recovering node 'NODE_ID': Failed in creating " - "STACK: BANNG.") - self.assertEqual(msg, str(ex)) - - def test_to_dict(self): - profile = self._create_profile('test-profile') - # simulate a store() - profile.id = 'FAKE_ID' - expected = { - 'id': 'FAKE_ID', - 'name': profile.name, - 'type': profile.type, - 'user': profile.user, - 'project': profile.project, - 'domain': profile.domain, - 'spec': profile.spec, - 'metadata': profile.metadata, - 'created_at': common_utils.isotime(profile.created_at), - 'updated_at': None, - } - - result = profile.to_dict() - self.assertEqual(expected, result) - - def test_validate_for_update_succeeded(self): - profile = self._create_profile('test-profile') - - # Properties are updatable - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['key1'] = 'new_v1' - new_spec['properties']['key2'] = 3 - new_profile = pb.Profile('new-profile', new_spec, - user=self.ctx.user_id, - project=self.ctx.project_id, - domain=self.ctx.domain_id, - context=None) - res = profile.validate_for_update(new_profile) - self.assertTrue(res) - - def test_validate_for_update_failed(self): - profile = self._create_profile('test-profile') - - # Property is not updatable - new_spec = copy.deepcopy(self.spec) - new_spec['properties']['key3'] = 'new_v3' - new_profile = pb.Profile('new-profile', new_spec, - user=self.ctx.user_id, - project=self.ctx.project_id, - domain=self.ctx.domain_id, - context=None) - - res = profile.validate_for_update(new_profile) - self.assertFalse(res) diff --git a/senlin/tests/unit/test_common_constraints.py b/senlin/tests/unit/test_common_constraints.py deleted file mode 100644 index 582a71780..000000000 --- a/senlin/tests/unit/test_common_constraints.py +++ /dev/null @@ -1,239 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import testtools - -from senlin.common import constraints -from senlin.common import exception as exc -from senlin.common import schema - - -class TestConstraintsSchema(testtools.TestCase): - def test_allowed_values(self): - d = { - 'constraint': ['foo', 'bar'], - 'type': 'AllowedValues' - } - r = constraints.AllowedValues(['foo', 'bar']) - - self.assertEqual(d, dict(r)) - - def test_allowed_values_numeric_int(self): - """Test AllowedValues constraint for numeric integer values. - - Test if the AllowedValues constraint works for numeric values in any - combination of numeric strings or numbers in the constraint and - numeric strings or numbers as value. - """ - - # Allowed values defined as integer numbers - s = schema.Integer( - constraints=[constraints.AllowedValues([1, 2, 4])] - ) - # ... and value as number or string - self.assertIsNone(s.validate(1)) - - err = self.assertRaises(exc.ESchema, s.validate, 3) - self.assertEqual("'3' must be one of the allowed values: 1, 2, 4", - str(err)) - - self.assertIsNone(s.validate('1')) - err = self.assertRaises(exc.ESchema, s.validate, '3') - self.assertEqual("'3' must be one of the allowed values: 1, 2, 4", - str(err)) - - # Allowed values defined as integer strings - s = schema.Integer( - constraints=[constraints.AllowedValues(['1', '2', '4'])] - ) - # ... and value as number or string - self.assertIsNone(s.validate(1)) - err = self.assertRaises(exc.ESchema, s.validate, 3) - self.assertEqual("'3' must be one of the allowed values: 1, 2, 4", - str(err)) - - self.assertIsNone(s.validate('1')) - err = self.assertRaises(exc.ESchema, s.validate, '3') - self.assertEqual("'3' must be one of the allowed values: 1, 2, 4", - str(err)) - - def test_allowed_values_numeric_float(self): - """Test AllowedValues constraint for numeric floating point values. - - Test if the AllowedValues constraint works for numeric values in any - combination of numeric strings or numbers in the constraint and - numeric strings or numbers as value. - """ - - # Allowed values defined as numbers - s = schema.Number( - constraints=[constraints.AllowedValues([1.1, 2.2, 4.4])] - ) - # ... and value as number or string - self.assertIsNone(s.validate_constraints(1.1)) - err = self.assertRaises(exc.ESchema, s.validate_constraints, 3.3) - self.assertEqual("'3.3' must be one of the allowed values: " - "1.1, 2.2, 4.4", str(err)) - self.assertIsNone(s.validate_constraints('1.1', s)) - err = self.assertRaises(exc.ESchema, s.validate_constraints, '3.3') - self.assertEqual("'3.3' must be one of the allowed values: " - "1.1, 2.2, 4.4", str(err)) - - # Allowed values defined as strings - s = schema.Number( - constraints=[constraints.AllowedValues(['1.1', '2.2', '4.4'])] - ) - # ... and value as number or string - self.assertIsNone(s.validate_constraints(1.1, s)) - err = self.assertRaises(exc.ESchema, s.validate_constraints, 3.3, s) - self.assertEqual("'3.3' must be one of the allowed values: " - "1.1, 2.2, 4.4", str(err)) - self.assertIsNone(s.validate_constraints('1.1', s)) - err = self.assertRaises(exc.ESchema, s.validate_constraints, '3.3', s) - self.assertEqual("'3.3' must be one of the allowed values: " - "1.1, 2.2, 4.4", str(err)) - - def test_schema_all(self): - d = { - 'type': 'String', - 'description': 'A string', - 'default': 'wibble', - 'required': True, - 'updatable': False, - 'constraints': [{ - 'constraint': ['foo', 'bar'], - 'type': 'AllowedValues' - }] - } - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - self.assertEqual(d, dict(s)) - - def test_schema_list_schema(self): - d = { - 'type': 'List', - 'description': 'A list', - 'schema': { - '*': { - 'type': 'String', - 'description': 'A string', - 'default': 'wibble', - 'required': True, - 'updatable': False, - 'constraints': [{ - 'constraint': ['foo', 'bar'], - 'type': 'AllowedValues' - }] - } - }, - 'required': False, - 'updatable': False, - } - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - li = schema.List('A list', schema=s) - self.assertEqual(d, dict(li)) - - def test_schema_map_schema(self): - d = { - 'type': 'Map', - 'description': 'A map', - 'schema': { - 'Foo': { - 'type': 'String', - 'description': 'A string', - 'default': 'wibble', - 'required': True, - 'updatable': False, - 'constraints': [{ - 'type': 'AllowedValues', - 'constraint': ['foo', 'bar'] - }] - } - }, - 'required': False, - 'updatable': False, - } - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - m = schema.Map('A map', schema={'Foo': s}) - self.assertEqual(d, dict(m)) - - def test_schema_nested_schema(self): - d = { - 'type': 'List', - 'description': 'A list', - 'schema': { - '*': { - 'type': 'Map', - 'description': 'A map', - 'schema': { - 'Foo': { - 'type': 'String', - 'description': 'A string', - 'default': 'wibble', - 'required': True, - 'updatable': False, - 'constraints': [{ - 'type': 'AllowedValues', - 'constraint': ['foo', 'bar'] - }] - } - }, - 'required': False, - 'updatable': False, - } - }, - 'required': False, - 'updatable': False, - } - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - m = schema.Map('A map', schema={'Foo': s}) - li = schema.List('A list', schema=m) - self.assertEqual(d, dict(li)) - - def test_schema_validate_good(self): - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - self.assertIsNone(s.validate('foo')) - - def test_schema_validate_fail(self): - c = constraints.AllowedValues(['foo', 'bar']) - s = schema.String('A string', default='wibble', required=True, - constraints=[c]) - err = self.assertRaises(exc.ESchema, s.validate, 'zoo') - self.assertIn("'zoo' must be one of the allowed values: foo, bar", - str(err)) - - def test_schema_nested_validate_good(self): - c = constraints.AllowedValues(['foo', 'bar']) - nested = schema.String('A string', default='wibble', required=True, - constraints=[c]) - s = schema.Map('A map', schema={'Foo': nested}) - self.assertIsNone(s.validate({'Foo': 'foo'})) - - def test_schema_nested_validate_fail(self): - c = constraints.AllowedValues(['foo', 'bar']) - nested = schema.String('A string', default='wibble', required=True, - constraints=[c]) - s = schema.Map('A map', schema={'Foo': nested}) - err = self.assertRaises(exc.ESchema, s.validate, {'Foo': 'zoo'}) - - self.assertIn("'zoo' must be one of the allowed values: foo, bar", - str(err)) diff --git a/senlin/tests/unit/test_common_context.py b/senlin/tests/unit/test_common_context.py deleted file mode 100644 index 3ce088cd7..000000000 --- a/senlin/tests/unit/test_common_context.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from senlin.common import context -from senlin.tests.unit.common import base - - -class TestRequestContext(base.SenlinTestCase): - - def setUp(self): - self.ctx = { - 'auth_url': 'http://xyz', - 'auth_token_info': {'123info': 'woop'}, - 'user_name': 'mick', - 'user_domain_name': 'user-domain-name', - 'project_id': 'project-id', - 'project_name': 'a project', - 'project_domain_name': 'a project domain', - 'domain_name': 'this domain', - 'trusts': None, - 'region_name': 'regionOne', - 'password': 'foo', - 'is_admin': False # needed for tests to work - } - - super(TestRequestContext, self).setUp() - - def test_request_context_init(self): - ctx = context.RequestContext( - auth_url=self.ctx.get('auth_url'), - auth_token_info=self.ctx.get('auth_token_info'), - user_name=self.ctx.get('user_name'), - user_domain_name=self.ctx.get('user_domain_name'), - project_id=self.ctx.get('project_id'), - project_name=self.ctx.get('project_name'), - project_domain_name=self.ctx.get('project_domain_name'), - domain_name=self.ctx.get('domain_name'), - trusts=self.ctx.get('trusts'), - region_name=self.ctx.get('region_name'), - password=self.ctx.get('password'), - is_admin=self.ctx.get('is_admin')) # need for tests to work - - ctx_dict = ctx.to_dict() - for k, v in self.ctx.items(): - self.assertEqual(v, ctx_dict.get(k)) - - def test_request_context_from_dict(self): - ctx = context.RequestContext.from_dict(self.ctx) - - ctx_dict = ctx.to_dict() - for k, v in self.ctx.items(): - self.assertEqual(v, ctx_dict.get(k)) - - def test_request_context_update(self): - ctx = context.RequestContext.from_dict(self.ctx) - - for k in self.ctx: - self.assertEqual(self.ctx.get(k), ctx.to_dict().get(k)) - override = '%s_override' % k - setattr(ctx, k, override) - self.assertEqual(override, ctx.to_dict().get(k)) - - def test_get_admin_context(self): - ctx1 = context.get_admin_context() - self.assertTrue(ctx1.is_admin) diff --git a/senlin/tests/unit/test_common_exception.py b/senlin/tests/unit/test_common_exception.py deleted file mode 100644 index c5689a010..000000000 --- a/senlin/tests/unit/test_common_exception.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import fixtures - -from senlin.common import exception -from senlin.common.i18n import _ -from senlin.tests.unit.common import base - - -class TestException(exception.SenlinException): - msg_fmt = _("Testing message %(text)s") - - -class TestSenlinException(base.SenlinTestCase): - - def test_fatal_exception_error(self): - self.useFixture(fixtures.MonkeyPatch( - 'senlin.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS', - True)) - self.assertRaises(KeyError, TestException) - - def test_format_string_error_message(self): - message = "This format %(message)s should work" - err = exception.Error(message) - self.assertEqual(message, str(err)) diff --git a/senlin/tests/unit/test_common_messaging.py b/senlin/tests/unit/test_common_messaging.py deleted file mode 100644 index 539b4b15a..000000000 --- a/senlin/tests/unit/test_common_messaging.py +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import oslo_messaging -import testtools - -from senlin.common import consts -from senlin.common import messaging - - -class TestUtilFunctions(testtools.TestCase): - - @mock.patch.object(oslo_messaging, "get_rpc_server") - @mock.patch("senlin.common.messaging.RequestContextSerializer") - @mock.patch("senlin.common.messaging.JsonPayloadSerializer") - def test_get_rpc_server(self, mock_json_serializer, - mock_context_serializer, - mock_get_rpc_server): - x_target = mock.Mock() - x_endpoint = mock.Mock() - x_json_serializer = mock.Mock() - mock_json_serializer.return_value = x_json_serializer - x_context_serializer = mock.Mock() - mock_context_serializer.return_value = x_context_serializer - x_rpc_server = mock.Mock() - mock_get_rpc_server.return_value = x_rpc_server - - res = messaging.get_rpc_server(x_target, x_endpoint) - - self.assertEqual(x_rpc_server, res) - mock_json_serializer.assert_called_once_with() - mock_context_serializer.assert_called_once_with(x_json_serializer) - mock_get_rpc_server.assert_called_once_with( - messaging.TRANSPORT, x_target, [x_endpoint], - executor='eventlet', serializer=x_context_serializer) - - @mock.patch.object(oslo_messaging, "get_rpc_server") - @mock.patch("senlin.common.messaging.RequestContextSerializer") - @mock.patch("senlin.common.messaging.JsonPayloadSerializer") - def test_get_rpc_server_with_serializer(self, mock_json_serializer, - mock_context_serializer, - mock_get_rpc_server): - x_target = mock.Mock() - x_endpoint = mock.Mock() - x_serializer = mock.Mock() - x_context_serializer = mock.Mock() - mock_context_serializer.return_value = x_context_serializer - x_rpc_server = mock.Mock() - mock_get_rpc_server.return_value = x_rpc_server - - res = messaging.get_rpc_server(x_target, x_endpoint, - serializer=x_serializer) - - self.assertEqual(x_rpc_server, res) - self.assertEqual(0, mock_json_serializer.call_count) - mock_context_serializer.assert_called_once_with(x_serializer) - mock_get_rpc_server.assert_called_once_with( - messaging.TRANSPORT, x_target, [x_endpoint], - executor='eventlet', serializer=x_context_serializer) - - @mock.patch("oslo_messaging.Target") - @mock.patch("senlin.common.messaging.RequestContextSerializer") - @mock.patch("senlin.common.messaging.JsonPayloadSerializer") - @mock.patch("oslo_messaging.get_rpc_client") - def test_get_rpc_client(self, mock_rpc_client, mock_json_serializer, - mock_context_serializer, mock_target): - topic = 'fake' - client = mock.Mock() - context = mock.Mock() - server = mock.Mock() - serializer = mock.Mock() - target = mock.Mock() - - mock_context_serializer.return_value = context - mock_json_serializer.return_value = serializer - mock_rpc_client.return_value = client - mock_target.return_value = target - - result = messaging.get_rpc_client(topic, server) - - mock_rpc_client.assert_called_once_with( - None, target, serializer=context - ) - mock_target.assert_called_once_with( - topic=topic, server=server, version=consts.RPC_API_VERSION_BASE - ) - mock_json_serializer.assert_called_once_with() - mock_context_serializer.assert_called_once_with(serializer) - - self.assertEqual(client, result) - - @mock.patch("oslo_messaging.Target") - @mock.patch("senlin.common.messaging.RequestContextSerializer") - @mock.patch("senlin.common.messaging.JsonPayloadSerializer") - @mock.patch("oslo_messaging.get_rpc_client") - def test_get_rpc_client_with_serializer(self, mock_rpc_client, - mock_json_serializer, - mock_context_serializer, - mock_target): - topic = 'fake' - client = mock.Mock() - context = mock.Mock() - custom_serializer = mock.Mock(name='custom') - server = mock.Mock() - target = mock.Mock() - - mock_context_serializer.return_value = context - mock_json_serializer.return_value = custom_serializer - mock_rpc_client.return_value = client - mock_target.return_value = target - - result = messaging.get_rpc_client( - topic, server, serializer=custom_serializer - ) - - mock_rpc_client.assert_called_once_with( - None, target, serializer=context - ) - mock_target.assert_called_once_with( - topic=topic, server=server, version=consts.RPC_API_VERSION_BASE - ) - mock_json_serializer.assert_not_called() - mock_context_serializer.assert_called_once_with(custom_serializer) - - self.assertEqual(client, result) diff --git a/senlin/tests/unit/test_common_policy.py b/senlin/tests/unit/test_common_policy.py deleted file mode 100644 index 77ef45151..000000000 --- a/senlin/tests/unit/test_common_policy.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from senlin.common import exception -from senlin.common import policy -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class PolicyEnforcerTest(base.SenlinTestCase): - - def setUp(self): - super(PolicyEnforcerTest, self).setUp() - - self.ctx = utils.dummy_context() - - @mock.patch.object(policy, '_get_enforcer') - def test_enforce(self, enforce): - mock_enforcer = mock.Mock() - mock_res = mock.Mock() - mock_enforcer.enforce.return_value = mock_res - enforce.return_value = mock_enforcer - target = mock.Mock() - - res = policy.enforce(self.ctx, 'RULE1', target, do_raise=True) - - self.assertEqual(res, mock_res) - enforce.assert_called_once_with() - mock_enforcer.enforce.assert_called_once_with( - 'RULE1', target, self.ctx.to_dict(), True, - exc=exception.Forbidden) diff --git a/senlin/tests/unit/test_common_scaleutils.py b/senlin/tests/unit/test_common_scaleutils.py deleted file mode 100644 index dd38af349..000000000 --- a/senlin/tests/unit/test_common_scaleutils.py +++ /dev/null @@ -1,426 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from senlin.common import consts -from senlin.common.i18n import _ -from senlin.common import scaleutils as su -from senlin.tests.unit.common import base - - -class ScaleUtilsTest(base.SenlinTestCase): - - def test_calculate_desired_exact(self): - # EXACT_CAPACITY - for i in range(10): - desired = self.getUniqueInteger() - res = su.calculate_desired(0, consts.EXACT_CAPACITY, desired, None) - self.assertEqual(desired, res) - - def test_calculate_desired_capacity(self): - # CHANGE_IN_CAPACITY - for i in range(10): - current = self.getUniqueInteger() - for j in range(10): - number = self.getUniqueInteger() - res = su.calculate_desired(current, consts.CHANGE_IN_CAPACITY, - number, None) - self.assertEqual(current + number, res) - - def test_calculate_desired_percentage_positive(self): - # CHANGE_IN_PERCENTAGE, positive - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 10, None) - self.assertEqual(11, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 15, None) - self.assertEqual(11, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 22, None) - self.assertEqual(12, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 1, None) - self.assertEqual(11, res) - - def test_calculate_desired_percentage_negative(self): - # CHANGE_IN_PERCENTAGE, negative - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -10, None) - self.assertEqual(9, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -15, None) - self.assertEqual(9, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -22, None) - self.assertEqual(8, res) - - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -1, None) - self.assertEqual(9, res) - - def test_calculate_desired_percentage_with_min_step(self): - # CHANGE_IN_PERCENTAGE, with min_step 0 - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 10, 0) - self.assertEqual(11, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -10, 0) - self.assertEqual(9, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 1, 0) - self.assertEqual(11, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -1, 0) - self.assertEqual(9, res) - - # CHANGE_IN_PERCENTAGE, with min_step 1 - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 10, 1) - self.assertEqual(11, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -10, 1) - self.assertEqual(9, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 1, 1) - self.assertEqual(11, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -1, 1) - self.assertEqual(9, res) - - # CHANGE_IN_PERCENTAGE, with min_step 2 - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 10, 2) - self.assertEqual(12, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -10, 2) - self.assertEqual(8, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, 1, 2) - self.assertEqual(12, res) - res = su.calculate_desired(10, consts.CHANGE_IN_PERCENTAGE, -1, 2) - self.assertEqual(8, res) - - def test_truncate_desired(self): - cluster = mock.Mock() - cluster.min_size = 10 - cluster.max_size = 50 - - # No constraints - for desired in [10, 11, 12, 49, 50]: - actual = su.truncate_desired(cluster, desired, None, None) - self.assertEqual(desired, actual) - - # min_size specified - actual = su.truncate_desired(cluster, 10, 20, None) - self.assertEqual(20, actual) - - # min_size None - actual = su.truncate_desired(cluster, 5, None, None) - self.assertEqual(10, actual) - - # max_size specified - actual = su.truncate_desired(cluster, 20, None, -1) - self.assertEqual(20, actual) - - actual = su.truncate_desired(cluster, 15, None, 30) - self.assertEqual(15, actual) - - actual = su.truncate_desired(cluster, 40, None, 30) - self.assertEqual(30, actual) - - # max_size not specified - actual = su.truncate_desired(cluster, 40, None, None) - self.assertEqual(40, actual) - - actual = su.truncate_desired(cluster, 60, None, None) - self.assertEqual(50, actual) - - def test_parse_resize_params_deletion(self): - action = mock.Mock() - cluster = mock.Mock() - action.inputs = { - consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY, - consts.ADJUSTMENT_NUMBER: 4, - consts.ADJUSTMENT_MIN_SIZE: 3, - consts.ADJUSTMENT_MAX_SIZE: 10, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True, - } - action.data = {} - action.RES_OK = 'OK' - - result, reason = su.parse_resize_params(action, cluster, 6) - - self.assertEqual('OK', result) - self.assertEqual('', reason) - self.assertEqual({'deletion': {'count': 2}}, action.data) - - def test_parse_resize_params_creation(self): - action = mock.Mock(RES_OK='OK') - cluster = mock.Mock() - action.inputs = { - consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY, - consts.ADJUSTMENT_NUMBER: 9, - consts.ADJUSTMENT_MIN_SIZE: 3, - consts.ADJUSTMENT_MAX_SIZE: 10, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True, - } - action.data = {} - - result, reason = su.parse_resize_params(action, cluster, 6) - - self.assertEqual('OK', result) - self.assertEqual('', reason) - self.assertEqual({'creation': {'count': 3}}, action.data) - - def test_parse_resize_params_invalid(self): - action = mock.Mock() - cluster = mock.Mock() - action.inputs = { - consts.ADJUSTMENT_TYPE: consts.EXACT_CAPACITY, - consts.ADJUSTMENT_NUMBER: 11, - consts.ADJUSTMENT_MIN_SIZE: 3, - consts.ADJUSTMENT_MAX_SIZE: 10, - consts.ADJUSTMENT_MIN_STEP: None, - consts.ADJUSTMENT_STRICT: True, - } - action.data = {} - action.RES_ERROR = 'ERROR' - - result, reason = su.parse_resize_params(action, cluster, 6) - - self.assertEqual('ERROR', result) - msg = _('The target capacity (11) is greater than ' - 'the specified max_size (10).') - self.assertEqual(msg, reason) - - def test_filter_error_nodes(self): - nodes = [ - mock.Mock(id='N1', status='ACTIVE', tainted=None), - mock.Mock(id='N2', tainted=None), - mock.Mock(id='N3', status='ACTIVE', tainted=None), - mock.Mock(id='N4', status='ERROR'), - mock.Mock(id='N5', status='ACTIVE', tainted=None), - mock.Mock(id='N6', status='WARNING'), - mock.Mock(id='N7', tainted=True), - mock.Mock(id='N8', status='ERROR'), - mock.Mock(id='N9', created_at=None), - mock.Mock(id='N10', tainted=False), - ] - res = su.filter_error_nodes(nodes) - self.assertIn('N4', res[0]) - self.assertIn('N6', res[0]) - self.assertIn('N7', res[0]) - self.assertIn('N8', res[0]) - self.assertIn('N9', res[0]) - self.assertEqual(5, len(res[1])) - - @mock.patch.object(su, 'filter_error_nodes') - def test_nodes_by_random(self, mock_filter): - good_nodes = [ - mock.Mock(id='N11', created_at=110), - mock.Mock(id='N15', created_at=150), - mock.Mock(id='N12', created_at=120), - mock.Mock(id='N13', created_at=130), - mock.Mock(id='N14', created_at=None), - ] - mock_filter.return_value = (['N1', 'N2'], good_nodes) - - nodes = mock.Mock() - - res = su.nodes_by_random(nodes, 1) - self.assertEqual(['N1'], res) - - res = su.nodes_by_random(nodes, 2) - self.assertEqual(['N1', 'N2'], res) - - res = su.nodes_by_random(nodes, 5) - self.assertIn('N1', res) - self.assertIn('N2', res) - self.assertEqual(5, len(res)) - - @mock.patch.object(su, 'filter_error_nodes') - def test_nodes_by_age_oldest(self, mock_filter): - good_nodes = [ - mock.Mock(id='N11', created_at=110), - mock.Mock(id='N15', created_at=150), - mock.Mock(id='N12', created_at=120), - mock.Mock(id='N13', created_at=130), - mock.Mock(id='N14', created_at=100), - ] - mock_filter.return_value = (['N1', 'N2'], good_nodes) - - nodes = mock.Mock() - - res = su.nodes_by_age(nodes, 1, True) - self.assertEqual(['N1'], res) - - res = su.nodes_by_age(nodes, 2, True) - self.assertEqual(['N1', 'N2'], res) - - res = su.nodes_by_age(nodes, 5, True) - self.assertEqual(['N1', 'N2', 'N14', 'N11', 'N12'], res) - - @mock.patch.object(su, 'filter_error_nodes') - def test_nodes_by_age_youngest(self, mock_filter): - good_nodes = [ - mock.Mock(id='N11', created_at=110), - mock.Mock(id='N15', created_at=150), - mock.Mock(id='N12', created_at=120), - mock.Mock(id='N13', created_at=130), - mock.Mock(id='N14', created_at=100), - ] - mock_filter.return_value = (['N1', 'N2'], good_nodes) - - nodes = mock.Mock() - - res = su.nodes_by_age(nodes, 1, False) - self.assertEqual(['N1'], res) - - res = su.nodes_by_age(nodes, 2, False) - self.assertEqual(['N1', 'N2'], res) - - res = su.nodes_by_age(nodes, 5, False) - self.assertEqual(['N1', 'N2', 'N15', 'N13', 'N12'], res) - - @mock.patch.object(su, 'filter_error_nodes') - def test_victims_by_profile_age_oldest(self, mock_filter): - good_nodes = [ - mock.Mock(id='N11', profile_created_at=110), - mock.Mock(id='N15', profile_created_at=150), - mock.Mock(id='N12', profile_created_at=120), - mock.Mock(id='N13', profile_created_at=130), - mock.Mock(id='N14', profile_created_at=140), - ] - mock_filter.return_value = (['N1', 'N2'], good_nodes) - - nodes = mock.Mock() - - res = su.nodes_by_profile_age(nodes, 1) - self.assertEqual(['N1'], res) - - res = su.nodes_by_profile_age(nodes, 2) - self.assertEqual(['N1', 'N2'], res) - - res = su.nodes_by_profile_age(nodes, 5) - self.assertEqual(['N1', 'N2', 'N11', 'N12', 'N13'], res) - - -class CheckSizeParamsTest(base.SenlinTestCase): - - scenarios = [ - ('10_15_x_x', dict( - desired=10, min_size=15, max_size=None, strict=True, - result='The target capacity (10) is less than the specified ' - 'min_size (15).')), - ('5_x10_x_x', dict( - desired=5, min_size=None, max_size=None, strict=True, - result='The target capacity (5) is less than the cluster\'s ' - 'min_size (10).')), - ('30_x_25_x', dict( - desired=30, min_size=None, max_size=25, strict=True, - result='The target capacity (30) is greater than the specified ' - 'max_size (25).')), - ('30_x_x20_x', dict( - desired=30, min_size=None, max_size=None, strict=True, - result='The target capacity (30) is greater than the cluster\'s ' - 'max_size (20).')), - ('x_25_x20_x', dict( - desired=None, min_size=25, max_size=None, strict=True, - result='The specified min_size (25) is greater than the current ' - 'max_size (20) of the cluster.')), - ('x_20_x_x', dict( - desired=None, min_size=20, max_size=None, strict=True, - result='The specified min_size (20) is greater than the current ' - 'desired_capacity (15) of the cluster.')), - ('x_x_5_x', dict( - desired=None, min_size=None, max_size=5, strict=True, - result='The specified max_size (5) is less than the current ' - 'min_size (10) of the cluster.')), - ('x_x_14_x', dict( - desired=None, min_size=None, max_size=14, strict=True, - result='The specified max_size (14) is less than the current ' - 'desired_capacity (15) of the cluster.')), - ('101_x_x_x', dict( - desired=101, min_size=None, max_size=None, strict=True, - result='The target capacity (101) is greater than the ' - 'maximum number of nodes allowed per cluster (100).')), - ('x_x_101_x', dict( - desired=None, min_size=None, max_size=101, strict=True, - result='The specified max_size (101) is greater than the ' - 'maximum number of nodes allowed per cluster (100).')), - # The following are okay cases - ('5_x10_x_x', dict( - desired=5, min_size=None, max_size=None, strict=False, - result=None)), - ('30_x_x20_x', dict( - desired=30, min_size=None, max_size=None, strict=False, - result=None)), - ('x_20_x_x', dict( - desired=None, min_size=20, max_size=None, strict=False, - result=None)), - ('x_x_14_x', dict( - desired=None, min_size=None, max_size=14, strict=False, - result=None)), - ('x_x_x_x', dict( - desired=None, min_size=None, max_size=None, strict=True, - result=None)), - ('18_x_x_x', dict( - desired=18, min_size=None, max_size=None, strict=True, - result=None)), - ('30_x_40_x', dict( - desired=30, min_size=None, max_size=40, strict=True, - result=None)), - ('x_x_40_x', dict( - desired=None, min_size=None, max_size=40, strict=True, - result=None)), - ('x_5_x_x', dict( - desired=None, min_size=5, max_size=None, strict=True, - result=None)), - ('x_15_x_x', dict( - desired=None, min_size=15, max_size=None, strict=True, - result=None)), - ('5_5_x_x', dict( - desired=5, min_size=5, max_size=None, strict=True, - result=None)), - ('20_x_x_x', dict( - desired=20, min_size=None, max_size=None, strict=True, - result=None)), - ('30_x_30_x', dict( - desired=30, min_size=None, max_size=30, strict=True, - result=None)), - ('30_x_-1_x', dict( - desired=30, min_size=None, max_size=-1, strict=True, - result=None)), - ('40_30_-1_x', dict( - desired=40, min_size=30, max_size=-1, strict=True, - result=None)), - ('x_x_-1_x', dict( - desired=None, min_size=None, max_size=-1, strict=True, - result=None)), - ] - - def setUp(self): - super(CheckSizeParamsTest, self).setUp() - cfg.CONF.set_override('max_nodes_per_cluster', 100) - - def test_check_size_params(self): - cluster = mock.Mock() - cluster.min_size = 10 - cluster.max_size = 20 - cluster.desired_capacity = 15 - - actual = su.check_size_params(cluster, self.desired, self.min_size, - self.max_size, self.strict) - self.assertEqual(self.result, actual) - - def test_check_size_params_default_strict(self): - cluster = mock.Mock() - cluster.min_size = 10 - cluster.max_size = 20 - cluster.desired_capacity = 15 - desired = 5 - min_size = None - max_size = None - - actual = su.check_size_params(cluster, desired, min_size, max_size) - self.assertIsNone(actual) diff --git a/senlin/tests/unit/test_common_schema.py b/senlin/tests/unit/test_common_schema.py deleted file mode 100644 index 5fdb29b29..000000000 --- a/senlin/tests/unit/test_common_schema.py +++ /dev/null @@ -1,956 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from unittest import mock - - -from senlin.common import constraints -from senlin.common import exception as exc -from senlin.common import schema -from senlin.tests.unit.common import base - - -class FakeSchema(schema.SchemaBase): - - def __getitem__(self, key): - if key == self.TYPE: - return self.STRING - return super(FakeSchema, self).__getitem__(key) - - def resolve(self, value): - return str(value) - - def validate(self, value, context=None): - return - - -class TestAnyIndexDict(base.SenlinTestCase): - - def test_basic(self): - sot = schema.AnyIndexDict('*') - - self.assertIsInstance(sot, collections.abc.Mapping) - - self.assertEqual('*', sot.value) - self.assertEqual('*', sot[1]) - self.assertEqual('*', sot[2]) - self.assertEqual('*', sot['*']) - - for a in sot: - self.assertEqual('*', a) - - self.assertEqual(1, len(sot)) - - def test_bad_index(self): - sot = schema.AnyIndexDict('*') - - ex = self.assertRaises(KeyError, sot.__getitem__, 'foo') - - # the following test is not interesting - self.assertEqual("'Invalid key foo'", str(ex)) - - -class TestSchemaBase(base.SenlinTestCase): - - def test_basic(self): - sot = FakeSchema(description='desc', default='default', required=True, - schema=None, constraints=None, min_version='1.0', - max_version='2.0') - self.assertEqual('desc', sot.description) - self.assertEqual('default', sot.default) - self.assertTrue(sot.required) - self.assertIsNone(sot.schema) - self.assertEqual([], sot.constraints) - self.assertEqual('1.0', sot.min_version) - self.assertEqual('2.0', sot.max_version) - self.assertTrue(sot.has_default()) - - def test_init_schema_invalid(self): - ex = self.assertRaises(exc.ESchema, FakeSchema, schema=mock.Mock()) - self.assertEqual('Schema valid only for List or Map, not String', - str(ex)) - - def test_get_default(self): - sot = FakeSchema(default='DEFAULT') - mock_resolve = self.patchobject(sot, 'resolve', return_value='VVV') - - res = sot.get_default() - - self.assertEqual('VVV', res) - mock_resolve.assert_called_once_with('DEFAULT') - - def test_validate_default(self): - sot = FakeSchema() - - self.assertIsNone(sot._validate_default(mock.Mock())) - - def test_validate_default_with_value(self): - sot = FakeSchema(default='DEFAULT') - mock_validate = self.patchobject(sot, 'validate', return_value=None) - fake_context = mock.Mock() - - res = sot._validate_default(fake_context) - - self.assertIsNone(res) - mock_validate.assert_called_once_with('DEFAULT', fake_context) - - def test_validate_default_with_value_but_failed(self): - sot = FakeSchema(default='DEFAULT') - mock_validate = self.patchobject(sot, 'validate', - side_effect=ValueError('boom')) - fake_context = mock.Mock() - - ex = self.assertRaises(exc.ESchema, - sot._validate_default, - fake_context) - - mock_validate.assert_called_once_with('DEFAULT', fake_context) - self.assertEqual('Invalid default DEFAULT: boom', str(ex)) - - def test_validate_constraints(self): - c1 = mock.Mock() - c2 = mock.Mock() - sot = FakeSchema(constraints=[c1, c2]) - ctx = mock.Mock() - - res = sot.validate_constraints('VALUE', context=ctx) - - self.assertIsNone(res) - c1.validate.assert_called_once_with('VALUE', schema=None, context=ctx) - c2.validate.assert_called_once_with('VALUE', schema=None, context=ctx) - - def test_validate_constraints_failed(self): - c1 = mock.Mock() - c1.validate.side_effect = ValueError('BOOM') - sot = FakeSchema(constraints=[c1]) - ctx = mock.Mock() - - ex = self.assertRaises(exc.ESchema, - sot.validate_constraints, - 'FOO', context=ctx) - - c1.validate.assert_called_once_with('FOO', schema=None, context=ctx) - self.assertEqual('BOOM', str(ex)) - - def test_validate_version(self): - sot = FakeSchema(min_version='1.0', max_version='2.0') - - res = sot._validate_version('field', '1.0') - self.assertIsNone(res) - - res = sot._validate_version('field', '1.1') - self.assertIsNone(res) - - # there is a warning, but validation passes - res = sot._validate_version('field', '2.0') - self.assertIsNone(res) - - ex = self.assertRaises(exc.ESchema, - sot._validate_version, - 'field', '0.9') - self.assertEqual('field (min_version=1.0) is not supported by ' - 'spec version 0.9.', - str(ex)) - - ex = self.assertRaises(exc.ESchema, - sot._validate_version, - 'field', '2.1') - self.assertEqual('field (max_version=2.0) is not supported by ' - 'spec version 2.1.', - str(ex)) - - def test_validate_version_no_min_version(self): - sot = FakeSchema(max_version='2.0') - - res = sot._validate_version('field', '1.0') - self.assertIsNone(res) - - res = sot._validate_version('field', '2.0') - self.assertIsNone(res) - - ex = self.assertRaises(exc.ESchema, - sot._validate_version, - 'field', '2.1') - self.assertEqual('field (max_version=2.0) is not supported by ' - 'spec version 2.1.', - str(ex)) - - def test_validate_version_no_max_version(self): - sot = FakeSchema(min_version='1.0') - - res = sot._validate_version('field', '1.0') - self.assertIsNone(res) - - res = sot._validate_version('field', '2.3') - self.assertIsNone(res) - - ex = self.assertRaises(exc.ESchema, - sot._validate_version, - 'field', '0.5') - self.assertEqual('field (min_version=1.0) is not supported by ' - 'spec version 0.5.', - str(ex)) - - def test_validate_version_no_version_restriction(self): - sot = FakeSchema() - - res = sot._validate_version('field', '1.0') - self.assertIsNone(res) - - res = sot._validate_version('field', '2.3') - self.assertIsNone(res) - - def test__getitem__(self): - sot = FakeSchema(description='desc', default='default', required=False, - constraints=[{'foo': 'bar'}]) - - self.assertEqual('desc', sot['description']) - self.assertEqual('default', sot['default']) - self.assertEqual(False, sot['required']) - self.assertEqual([{'foo': 'bar'}], sot['constraints']) - self.assertRaises(KeyError, sot.__getitem__, 'bogus') - - sot = schema.List(schema=schema.String()) - self.assertEqual( - { - '*': { - 'required': False, - 'type': 'String', - 'updatable': False - } - }, - sot['schema']) - - def test__iter__(self): - sot = FakeSchema(description='desc', default='default', required=False, - constraints=[{'foo': 'bar'}]) - - res = list(iter(sot)) - - self.assertIn('type', res) - self.assertIn('description', res) - self.assertIn('default', res) - self.assertIn('required', res) - self.assertIn('constraints', res) - - def test__len__(self): - sot = FakeSchema() - - res = list(iter(sot)) - - self.assertIn('type', res) - self.assertIn('required', res) - self.assertEqual(2, len(sot)) - - -class TestPropertySchema(base.SenlinTestCase): - - def setUp(self): - super(TestPropertySchema, self).setUp() - - class TestProperty(schema.PropertySchema): - - def __getitem__(self, key): - if key == self.TYPE: - return 'TEST' - return super(TestProperty, self).__getitem__(key) - - self.cls = TestProperty - - def test_basic(self): - sot = self.cls() - - self.assertIsNone(sot.description) - self.assertIsNone(sot.default) - self.assertFalse(sot.required) - self.assertIsNone(sot.schema) - self.assertEqual([], sot.constraints) - self.assertIsNone(sot.min_version) - self.assertIsNone(sot.max_version) - self.assertFalse(sot.updatable) - - def test__getitem__(self): - sot = self.cls(updatable=True) - - res = sot['updatable'] - - self.assertTrue(res) - self.assertTrue(sot.updatable) - - -class TestBoolean(base.SenlinTestCase): - - def test_basic(self): - sot = schema.Boolean('desc') - - self.assertEqual('Boolean', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_to_schema_type(self): - sot = schema.Boolean('desc') - - res = sot.to_schema_type(True) - self.assertTrue(res) - - res = sot.to_schema_type('true') - self.assertTrue(res) - - res = sot.to_schema_type('trUE') - self.assertTrue(res) - - res = sot.to_schema_type('False') - self.assertFalse(res) - - res = sot.to_schema_type('FALSE') - self.assertFalse(res) - - ex = self.assertRaises(exc.ESchema, sot.to_schema_type, 'bogus') - self.assertEqual("The value 'bogus' is not a valid Boolean", - str(ex)) - - def test_resolve(self): - sot = schema.Boolean() - - res = sot.resolve(True) - self.assertTrue(res) - - res = sot.resolve(False) - self.assertFalse(res) - - res = sot.resolve('Yes') - self.assertTrue(res) - - def test_validate(self): - sot = schema.Boolean() - - res = sot.validate(True) - self.assertIsNone(res) - - res = sot.validate('No') - self.assertIsNone(res) - - ex = self.assertRaises(exc.ESchema, sot.validate, 'bogus') - self.assertEqual("The value 'bogus' is not a valid Boolean", - str(ex)) - - -class TestInteger(base.SenlinTestCase): - - def test_basic(self): - sot = schema.Integer('desc') - - self.assertEqual('Integer', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_to_schema_type(self): - sot = schema.Integer('desc') - - res = sot.to_schema_type(123) - self.assertEqual(123, res) - - res = sot.to_schema_type('123') - self.assertEqual(123, res) - - res = sot.to_schema_type(False) - self.assertEqual(0, res) - - self.assertIsNone(sot.to_schema_type(None)) - - ex = self.assertRaises(exc.ESchema, sot.to_schema_type, '456L') - self.assertEqual("The value '456L' is not a valid Integer", - str(ex)) - - def test_resolve(self): - sot = schema.Integer() - - res = sot.resolve(1) - self.assertEqual(1, res) - - res = sot.resolve(True) - self.assertEqual(1, res) - - res = sot.resolve(False) - self.assertEqual(0, res) - - self.assertIsNone(sot.resolve(None)) - - ex = self.assertRaises(exc.ESchema, sot.resolve, '456L') - self.assertEqual("The value '456L' is not a valid Integer", - str(ex)) - - def test_validate(self): - sot = schema.Integer() - - res = sot.validate(1) - self.assertIsNone(res) - - res = sot.validate('1') - self.assertIsNone(res) - - res = sot.validate(True) - self.assertIsNone(res) - - mock_constraints = self.patchobject(sot, 'validate_constraints', - return_value=None) - - res = sot.validate(1) - self.assertIsNone(res) - mock_constraints.assert_called_once_with(1, schema=sot, context=None) - ex = self.assertRaises(exc.ESchema, sot.validate, 'bogus') - self.assertEqual("The value 'bogus' is not a valid Integer", - str(ex)) - - -class TestString(base.SenlinTestCase): - - def test_basic(self): - sot = schema.String('desc') - - self.assertEqual('String', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_invalid_constructor(self): - self.assertRaises(exc.ESchema, - schema.String, - schema=schema.String('String')) - - def test_to_schema_type(self): - sot = schema.String('desc') - - res = sot.to_schema_type(123) - self.assertEqual('123', res) - - res = sot.to_schema_type('123') - self.assertEqual('123', res) - - res = sot.to_schema_type(False) - self.assertEqual('False', res) - - res = sot.to_schema_type(None) - self.assertIsNone(res) - - res = sot.to_schema_type(u'\u4e2d\u6587') - self.assertEqual(u'\u4e2d\u6587', res) - - def test_resolve(self): - sot = schema.String() - - res = sot.resolve(1) - self.assertEqual('1', res) - - res = sot.resolve(True) - self.assertEqual('True', res) - - res = sot.resolve(None) - self.assertIsNone(res) - - def test_validate(self): - sot = schema.String() - - res = sot.validate('1') - self.assertIsNone(res) - - res = sot.validate(u'unicode') - self.assertIsNone(res) - - mock_constraints = self.patchobject(sot, 'validate_constraints', - return_value=None) - - res = sot.validate("abcd") - self.assertIsNone(res) - mock_constraints.assert_called_once_with( - "abcd", schema=sot, context=None) - - -class TestNumber(base.SenlinTestCase): - - def test_basic(self): - sot = schema.Number('desc') - - self.assertEqual('Number', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_to_schema_type(self): - sot = schema.Number('desc') - - res = sot.to_schema_type(123) - self.assertEqual(123, res) - - res = sot.to_schema_type(123.34) - self.assertEqual(123.34, res) - - res = sot.to_schema_type(False) - self.assertEqual(False, res) - - def test_resolve(self): - sot = schema.Number() - mock_convert = self.patchobject(sot, 'to_schema_type') - - res = sot.resolve(1) - self.assertEqual(mock_convert.return_value, res) - mock_convert.assert_called_once_with(1) - - def test_validate(self): - sot = schema.Number() - - res = sot.validate(1) - self.assertIsNone(res) - - res = sot.validate('1') - self.assertIsNone(res) - - ex = self.assertRaises(exc.ESchema, sot.validate, "bogus") - self.assertEqual("The value 'bogus' is not a valid number.", - str(ex)) - - mock_constraints = self.patchobject(sot, 'validate_constraints', - return_value=None) - - res = sot.validate('1234') - self.assertIsNone(res) - mock_constraints.assert_called_once_with( - 1234, schema=sot, context=None) - - -class TestList(base.SenlinTestCase): - - def test_basic(self): - sot = schema.List('desc') - - self.assertEqual('List', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_get_children(self): - sot = schema.List('desc', schema=schema.String()) - - res = sot._get_children(['v1', 'v2'], [0, 1]) - self.assertEqual(['v1', 'v2'], list(res)) - - def test_resolve(self): - sot = schema.List(schema=schema.String()) - - res = sot.resolve(['v1', 'v2']) - - self.assertEqual(['v1', 'v2'], res) - - self.assertRaises(TypeError, - sot.resolve, - 123) - - def test_validate(self): - sot = schema.List(schema=schema.String()) - - res = sot.validate(['abc', 'def']) - - self.assertIsNone(res) - - def test_validate_failed(self): - sot = schema.List(schema=schema.String()) - - ex = self.assertRaises(exc.ESchema, sot.validate, None) - self.assertEqual("'None' is not a List", str(ex)) - - -class TestMap(base.SenlinTestCase): - - def test_basic(self): - sot = schema.Map('desc') - - self.assertEqual('Map', sot['type']) - self.assertEqual('desc', sot['description']) - - def test_get_children(self): - sot = schema.Map('desc', schema={'foo': schema.String()}) - - res = sot._get_children({'foo': 'bar'}) - - self.assertEqual({'foo': 'bar'}, dict(res)) - - def test_get_default(self): - sot = schema.Map(schema={'foo': schema.String()}) - self.assertEqual({}, sot.get_default()) - - sot = schema.Map(default={'foo': 'bar'}, - schema={'foo': schema.String()}) - self.assertEqual({'foo': 'bar'}, sot.get_default()) - - sot = schema.Map(default='bad', schema={'foo': schema.String()}) - ex = self.assertRaises(exc.ESchema, sot.get_default) - self.assertEqual("'bad' is not a Map", str(ex)) - - def test_resolve(self): - sot = schema.Map(schema={'foo': schema.String()}) - - res = sot.resolve({"foo": "bar"}) - self.assertEqual({'foo': 'bar'}, res) - - res = sot.resolve('{"foo": "bar"}') - self.assertEqual({'foo': 'bar'}, res) - - ex = self.assertRaises(exc.ESchema, sot.resolve, 'plainstring') - self.assertEqual("'plainstring' is not a Map", str(ex)) - - def test_validate(self): - sot = schema.Map(schema={'foo': schema.String()}) - - res = sot.validate({"foo": "bar"}) - - self.assertIsNone(res) - - def test_validate_failed(self): - sot = schema.Map(schema={'foo': schema.String()}) - - ex = self.assertRaises(exc.ESchema, sot.validate, None) - self.assertEqual("'None' is not a Map", str(ex)) - - ex = self.assertRaises(exc.ESchema, sot.validate, 'bogus') - self.assertEqual("'bogus' is not a Map", str(ex)) - - -class TestStringParam(base.SenlinTestCase): - - def test_basic(self): - sot = schema.StringParam() - self.assertEqual('String', sot['type']) - self.assertEqual(False, sot['required']) - - def test_validate(self): - sot = schema.StringParam() - result = sot.validate('foo') - self.assertIsNone(result) - - def test_validate_bad_type(self): - sot = schema.StringParam() - self.assertRaises(TypeError, - sot.validate, - ['123']) - - def test_validate_failed_constraint(self): - sot = schema.StringParam( - constraints=[constraints.AllowedValues(('abc', 'def'))]) - - ex = self.assertRaises(exc.ESchema, sot.validate, '123') - - self.assertEqual("'123' must be one of the allowed values: abc, def", - str(ex)) - - -class TestIntegerParam(base.SenlinTestCase): - - def test_basic(self): - sot = schema.IntegerParam() - self.assertEqual('Integer', sot['type']) - self.assertEqual(False, sot['required']) - - def test_validate(self): - sot = schema.IntegerParam() - result = sot.validate(123) - self.assertIsNone(result) - - def test_validate_bad_type(self): - sot = schema.IntegerParam() - self.assertRaises(ValueError, - sot.validate, - 'not int') - - def test_validate_failed_constraint(self): - sot = schema.IntegerParam( - constraints=[constraints.AllowedValues((123, 124))]) - - ex = self.assertRaises(exc.ESchema, sot.validate, 12) - - self.assertEqual("'12' must be one of the allowed values: 123, 124", - str(ex)) - - -class TestOperation(base.SenlinTestCase): - - def test_basic(self): - sot = schema.Operation() - self.assertEqual('Undocumented', sot['description']) - self.assertEqual({}, sot['parameters']) - - def test_initialized(self): - sot = schema.Operation('des', schema={'foo': schema.StringParam()}) - self.assertEqual('des', sot['description']) - self.assertEqual({'foo': {'required': False, 'type': 'String'}}, - sot['parameters']) - - def test_validate(self): - sot = schema.Operation('des', schema={'foo': schema.StringParam()}) - res = sot.validate({'foo': 'bar'}) - self.assertIsNone(res) - - def test_validate_unrecognizable_param(self): - sot = schema.Operation('des', schema={'foo': schema.StringParam()}) - - ex = self.assertRaises(exc.ESchema, sot.validate, - {'baar': 'baar'}) - - self.assertEqual("Unrecognizable parameter 'baar'", str(ex)) - - def test_validate_failed_type(self): - sot = schema.Operation('des', schema={'foo': schema.StringParam()}) - - ex = self.assertRaises(exc.ESchema, sot.validate, - {'foo': ['baaar']}) - - self.assertEqual("value is not a string", - str(ex)) - - def test_validate_failed_constraint(self): - sot = schema.Operation( - 'des', - schema={ - 'foo': schema.StringParam( - constraints=[constraints.AllowedValues(['bar'])]) - } - ) - - ex = self.assertRaises(exc.ESchema, sot.validate, - {'foo': 'baaar'}) - - self.assertEqual("'baaar' must be one of the allowed values: bar", - str(ex)) - - def test_validate_failed_required(self): - sot = schema.Operation( - 'des', - schema={ - 'foo': schema.StringParam(), - 'bar': schema.StringParam(required=True) - } - ) - - ex = self.assertRaises(exc.ESchema, sot.validate, - {'foo': 'baaar'}) - - self.assertEqual("Required parameter 'bar' not provided", - str(ex)) - - def test_validate_failed_version(self): - sot = schema.Operation( - 'des', - schema={ - 'foo': schema.StringParam(min_version='2.0'), - } - ) - - ex = self.assertRaises(exc.ESchema, sot.validate, - {'foo': 'baaar'}, '1.0') - - self.assertEqual("foo (min_version=2.0) is not supported by spec " - "version 1.0.", str(ex)) - - -class TestSpec(base.SenlinTestCase): - spec_schema = { - 'key1': schema.String('first key', default='value1'), - 'key2': schema.Integer('second key', required=True), - } - - def test_init(self): - data = {'key1': 'value1', 'key2': 2} - sot = schema.Spec(self.spec_schema, data) - - self.assertEqual(self.spec_schema, sot._schema) - self.assertEqual(data, sot._data) - self.assertIsNone(sot._version) - - def test_init_with_version(self): - data = {'key1': 'value1', 'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - self.assertEqual(self.spec_schema, sot._schema) - self.assertEqual(data, sot._data) - self.assertEqual('1.2', sot._version) - - def test_validate(self): - data = {'key1': 'value1', 'key2': 2} - sot = schema.Spec(self.spec_schema, data) - res = sot.validate() - self.assertIsNone(res) - - data1 = {'key2': 2} - sot = schema.Spec(self.spec_schema, data1) - res = sot.validate() - self.assertIsNone(res) - - def test_validate_fail_unrecognizable_key(self): - spec_schema = { - 'key1': schema.String('first key', default='value1'), - } - data = {'key1': 'value1', 'key2': 2} - sot = schema.Spec(spec_schema, data, version='1.0') - ex = self.assertRaises(exc.ESchema, sot.validate) - - self.assertIn("Unrecognizable spec item 'key2'", - str(ex.message)) - - def test_validate_fail_value_type_incorrect(self): - spec_schema = { - 'key1': schema.String('first key', default='value1'), - 'key2': schema.Integer('second key', required=True), - } - - data = {'key1': 'value1', 'key2': 'abc'} - spec = schema.Spec(spec_schema, data, version='1.0') - ex = self.assertRaises(exc.ESchema, spec.validate) - self.assertIn("The value 'abc' is not a valid Integer", - str(ex.message)) - - def test_validate_version_good(self): - spec_schema = { - 'type': schema.String('Type name', required=True), - 'version': schema.String('Version number', required=True), - 'key1': schema.String('first key', default='value1'), - 'key2': schema.Integer('second key', required=True, - min_version='1.0', max_version='1.2'), - } - - data = { - 'key1': 'value1', - 'key2': 2, - 'type': 'test-type', - 'version': '1.0' - } - spec = schema.Spec(spec_schema, data) - self.assertIsNone(spec.validate()) - - data = {'key2': 2, 'type': 'test-type', 'version': '1.2'} - spec = schema.Spec(spec_schema, data) - self.assertIsNone(spec.validate()) - - def test_validate_version_fail_unsupported_version(self): - spec_schema = { - 'type': schema.String('Type name', required=True), - 'version': schema.String('Version number', required=True), - 'key1': schema.String('first key', default='value1', - min_version='1.1'), - 'key2': schema.Integer('second key', required=True), - } - - data = { - 'key1': 'value1', - 'key2': 2, - 'type': 'test-type', - 'version': '1.0' - } - spec = schema.Spec(spec_schema, data, version='1.0') - ex = self.assertRaises(exc.ESchema, spec.validate) - msg = 'key1 (min_version=1.1) is not supported by spec version 1.0.' - self.assertIn(msg, str(ex.message)) - - def test_validate_version_fail_version_over_max(self): - spec_schema = { - 'type': schema.String('Type name', required=True), - 'version': schema.String('Version number', required=True), - 'key1': schema.String('first key', default='value1', - max_version='2.0'), - 'key2': schema.Integer('second key', required=True), - } - - data = { - 'key1': 'value1', - 'key2': 2, - 'type': 'test-type', - 'version': '3.0' - } - spec = schema.Spec(spec_schema, data, version='3.0') - ex = self.assertRaises(exc.ESchema, spec.validate) - msg = 'key1 (max_version=2.0) is not supported by spec version 3.0.' - self.assertIn(msg, str(ex.message)) - - def test_resolve_value(self): - data = {'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - res = sot.resolve_value('key2') - self.assertEqual(2, res) - - res = sot.resolve_value('key1') - self.assertEqual('value1', res) - - ex = self.assertRaises(exc.ESchema, sot.resolve_value, 'key3') - self.assertEqual("Invalid spec item: key3", str(ex)) - - def test_resolve_value_required_key_missing(self): - data = {'key1': 'value1'} - sot = schema.Spec(self.spec_schema, data, version='1.0') - - ex = self.assertRaises(exc.ESchema, sot.resolve_value, 'key2') - self.assertIn("Required spec item 'key2' not provided", - str(ex.message)) - - def test__getitem__(self): - data = {'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - res = sot['key1'] - self.assertEqual('value1', res) - res = sot['key2'] - self.assertEqual(2, res) - - def test__len__(self): - data = {'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - res = len(sot) - self.assertEqual(2, res) - - def test__contains__(self): - data = {'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - self.assertIn('key1', sot) - self.assertIn('key2', sot) - self.assertNotIn('key3', sot) - - def test__iter__(self): - data = {'key2': 2} - sot = schema.Spec(self.spec_schema, data, version='1.2') - - res = [k for k in iter(sot)] - - self.assertIn('key1', res) - self.assertIn('key2', res) - - -class TestSpecVersionChecking(base.SenlinTestCase): - - def test_spec_version_okay(self): - spec = {'type': 'Foo', 'version': 'version string'} - res = schema.get_spec_version(spec) - self.assertEqual(('Foo', 'version string'), res) - - spec = {'type': 'Foo', 'version': 1.5} - res = schema.get_spec_version(spec) - self.assertEqual(('Foo', '1.5'), res) - - def test_spec_version_not_dict(self): - spec = 'a string' - ex = self.assertRaises(exc.ESchema, schema.get_spec_version, spec) - self.assertEqual('The provided spec is not a map.', - str(ex)) - - def test_spec_version_no_type_key(self): - spec = {'tpye': 'a string'} - ex = self.assertRaises(exc.ESchema, schema.get_spec_version, spec) - self.assertEqual("The 'type' key is missing from the provided " - "spec map.", str(ex)) - - def test_spec_version_no_version_key(self): - spec = {'type': 'a string', 'ver': '123'} - ex = self.assertRaises(exc.ESchema, schema.get_spec_version, spec) - self.assertEqual("The 'version' key is missing from the provided " - "spec map.", str(ex)) diff --git a/senlin/tests/unit/test_common_utils.py b/senlin/tests/unit/test_common_utils.py deleted file mode 100644 index 492707b09..000000000 --- a/senlin/tests/unit/test_common_utils.py +++ /dev/null @@ -1,299 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import io -from unittest import mock -import urllib - -from oslo_log import log as logging -from oslo_utils import timeutils -import requests - -from oslo_config import cfg - -from senlin.common import exception -from senlin.common import utils -from senlin.objects import service as service_obj -from senlin.tests.unit.common import base - - -class TestGetPositiveInt(base.SenlinTestCase): - - def test_get_positive_int(self): - cases = {1: 1, 2: 2, '1': 1, '2': 2} - for value, expected in cases.items(): - res, actual = utils.get_positive_int(value) - self.assertTrue(res) - self.assertEqual(expected, actual) - - bad_values = ['foo', {}, [], -1, 1.5, 0.2, None] - for value in bad_values: - res, actual = utils.get_positive_int(value) - self.assertFalse(res) - self.assertEqual(0, actual) - - -class Response(object): - def __init__(self, buf=''): - self.buf = buf - - def iter_content(self, chunk_size=1): - while self.buf: - yield self.buf[:chunk_size] - self.buf = self.buf[chunk_size:] - - def raise_for_status(self): - pass - - -class UrlFetchTest(base.SenlinTestCase): - def test_file_scheme_default_behaviour(self): - self.assertRaises(utils.URLFetchError, - utils.url_fetch, 'file:///etc/profile') - - @mock.patch('urllib.request.urlopen') - def test_file_scheme_supported(self, mock_urlopen): - data = '{ "foo": "bar" }' - url = 'file:///etc/profile' - mock_urlopen.return_value = io.StringIO(data) - - actual = utils.url_fetch(url, allowed_schemes=['file']) - self.assertEqual(data, actual) - - @mock.patch('urllib.request.urlopen') - def test_file_scheme_failure(self, mock_urlopen): - url = 'file:///etc/profile' - mock_urlopen.side_effect = urllib.error.URLError('oops') - - self.assertRaises(utils.URLFetchError, - utils.url_fetch, url, allowed_schemes=['file']) - - def test_http_scheme(self): - url = 'http://example.com/somedata' - data = '{ "foo": "bar" }' - response = Response(data) - self.patchobject(requests, 'get', return_value=response) - self.assertEqual(data, utils.url_fetch(url)) - - def test_https_scheme(self): - url = 'https://example.com/somedata' - data = '{ "foo": "bar" }' - self.patchobject(requests, 'get', return_value=Response(data)) - self.assertEqual(data, utils.url_fetch(url)) - - def test_http_error(self): - url = 'http://example.com/somedata' - - self.patchobject(requests, 'get', - side_effect=requests.exceptions.HTTPError()) - self.assertRaises(utils.URLFetchError, utils.url_fetch, url) - - def test_non_exist_url(self): - url = 'http://non-exist.com/somedata' - - self.patchobject(requests, 'get', - side_effect=requests.exceptions.Timeout()) - self.assertRaises(utils.URLFetchError, utils.url_fetch, url) - - def test_garbage(self): - self.assertRaises(utils.URLFetchError, utils.url_fetch, 'wibble') - - def test_max_fetch_size_okay(self): - url = 'http://example.com/somedata' - data = '{ "foo": "bar" }' - cfg.CONF.set_override('max_response_size', 500) - self.patchobject(requests, 'get', return_value=Response(data)) - utils.url_fetch(url) - - def test_max_fetch_size_error(self): - url = 'http://example.com/somedata' - data = '{ "foo": "bar" }' - cfg.CONF.set_override('max_response_size', 5) - self.patchobject(requests, 'get', return_value=Response(data)) - exception = self.assertRaises(utils.URLFetchError, - utils.url_fetch, url) - self.assertIn("Data exceeds", str(exception)) - - @mock.patch.object(requests, 'get') - def test_string_response(self, mock_get): - url = 'http://example.com/somedata' - data = '{ "foo": "bar" }' - - mock_resp = mock.Mock() - mock_resp.iter_content.return_value = [data] - mock_get.return_value = mock_resp - - self.assertEqual(data, utils.url_fetch(url)) - - @mock.patch.object(requests, 'get') - def test_byte_response(self, mock_get): - url = 'http://example.com/somedata' - data = b'{ "foo": "bar" }' - - mock_resp = mock.Mock() - mock_resp.iter_content.return_value = [data] - mock_get.return_value = mock_resp - - self.assertEqual('{ "foo": "bar" }', utils.url_fetch(url)) - - -class TestRandomName(base.SenlinTestCase): - - def test_default(self): - result = utils.random_name() - self.assertIsNotNone(result) - self.assertEqual(8, len(result)) - - result1 = utils.random_name() - self.assertIsNotNone(result1) - self.assertEqual(8, len(result1)) - - self.assertNotEqual(result, result1) - - def test_with_length(self): - result = utils.random_name(12) - self.assertIsNotNone(result) - self.assertEqual(12, len(result)) - - result1 = utils.random_name(12) - self.assertIsNotNone(result1) - self.assertEqual(12, len(result1)) - - self.assertNotEqual(result, result1) - - def test_with_bad_length(self): - result = utils.random_name(0) - self.assertEqual('', result) - - result = utils.random_name(-9) - self.assertEqual('', result) - - -class TestFormatNodeName(base.SenlinTestCase): - - def test_empty(self): - res = utils.format_node_name(None, None, 0) - self.assertIsNotNone(res) - self.assertEqual(13, len(res)) - - res = utils.format_node_name("", None, 0) - self.assertIsNotNone(res) - self.assertEqual(13, len(res)) - - def test_has_random(self): - res = utils.format_node_name("prefix-$R", None, 0) - self.assertEqual(15, len(res)) - - res = utils.format_node_name("prefix-$5R", None, 0) - self.assertEqual(12, len(res)) - - def test_has_index(self): - res = utils.format_node_name("prefix-$I", None, 12) - self.assertEqual(9, len(res)) - - res = utils.format_node_name("prefix-$5I", None, 12) - self.assertEqual(12, len(res)) - - def test_has_both(self): - res = utils.format_node_name("prefix-$3R-$I", None, 12) - self.assertEqual(13, len(res)) - - res = utils.format_node_name("$3R-prefix-$5I", None, 12) - self.assertEqual(16, len(res)) - - -class TestParseLevelValues(base.SenlinTestCase): - - def test_none(self): - res = utils.parse_level_values(None) - self.assertIsNone(res) - - def test_empty_list(self): - res = utils.parse_level_values([]) - self.assertIsNone(res) - - def test_single_value(self): - res = utils.parse_level_values('ERROR') - self.assertEqual([logging.ERROR], res) - - def test_multi_values(self): - res = utils.parse_level_values(['WARN', 'ERROR']) - self.assertEqual([logging.WARNING, logging.ERROR], res) - - def test_with_invalid_values(self): - res = utils.parse_level_values(['warn', 'ERROR']) - self.assertEqual([logging.ERROR], res) - - def test_with_integers(self): - res = utils.parse_level_values(40) - self.assertEqual([40], res) - - def test_with_only_invalid_values(self): - res = utils.parse_level_values(['warn']) - self.assertIsNone(res) - - -class TestGetPathParser(base.SenlinTestCase): - - def test_normal(self): - res = utils.get_path_parser('foo.bar') - self.assertIsNotNone(res) - - def test_bad_path(self): - err = self.assertRaises(exception.BadRequest, - utils.get_path_parser, - '^foo.bar') - self.assertEqual("Invalid attribute path - Unexpected " - "character: ^.", str(err)) - - -class EngineDeathTest(base.SenlinTestCase): - - def setUp(self): - super(EngineDeathTest, self).setUp() - self.ctx = mock.Mock() - - @mock.patch.object(service_obj.Service, 'get') - def test_engine_is_none(self, mock_service): - mock_service.return_value = None - self.assertTrue(utils.is_service_dead(self.ctx, 'fake_engine_id')) - mock_service.assert_called_once_with(self.ctx, 'fake_engine_id') - - @mock.patch.object(service_obj.Service, 'get') - def test_engine_is_dead(self, mock_service): - delta = datetime.timedelta(seconds=3 * cfg.CONF.periodic_interval) - update_time = timeutils.utcnow(True) - delta - mock_service.return_value = mock.Mock(updated_at=update_time) - - res = utils.is_service_dead(self.ctx, 'fake_engine_id') - - self.assertTrue(res) - mock_service.assert_called_once_with(self.ctx, 'fake_engine_id') - - @mock.patch.object(service_obj.Service, 'get') - def test_engine_is_alive(self, mock_svc): - mock_svc.return_value = mock.Mock(updated_at=timeutils.utcnow(True)) - - res = utils.is_service_dead(self.ctx, 'fake_engine_id') - - self.assertFalse(res) - mock_svc.assert_called_once_with(self.ctx, 'fake_engine_id') - - @mock.patch.object(service_obj.Service, 'get') - def test_use_specified_duration(self, mock_svc): - mock_svc.return_value = mock.Mock(updated_at=timeutils.utcnow(True)) - - res = utils.is_service_dead(self.ctx, 'fake_engine_id', 10000) - - self.assertFalse(res) - mock_svc.assert_called_once_with(self.ctx, 'fake_engine_id') diff --git a/senlin/tests/unit/test_conf.py b/senlin/tests/unit/test_conf.py deleted file mode 100644 index e46a9b32e..000000000 --- a/senlin/tests/unit/test_conf.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -import oslotest.base - -from senlin.conf import engine -from senlin.conf import opts - - -class TestConfOpts(oslotest.base.BaseTestCase): - def setUp(self): - super(TestConfOpts, self).setUp() - - def test_opts_tupleize(self): - self.assertEqual([('a', 'b')], opts._tupleize({'a': 'b'})) - - def test_opts_list(self): - self.assertIsInstance(opts.list_opts(), list) - - @mock.patch('pkgutil.iter_modules') - def test_opts_list_module_names(self, mock_iter_modules): - mock_iter_modules.return_value = iter( - [ - (None, 'api', False), - (None, 'authentication', False), - (None, 'unknown', True), - ] - ) - - self.assertEqual(['api', 'authentication'], opts._list_module_names()) - - def test_opts_import_modules(self): - self.assertEqual([engine], opts._import_modules(['engine'])) - - @mock.patch('importlib.import_module') - def test_opts_import_invalid_module(self, mock_import_module): - mock_import_module.return_value = None - - self.assertRaisesRegex( - Exception, - "The module 'senlin.conf.invalid' should have a 'list_opts' " - "function which returns the config options.", - opts._import_modules, ['invalid'] - ) diff --git a/senlin/tests/unit/test_hacking.py b/senlin/tests/unit/test_hacking.py deleted file mode 100644 index d6c35c4f5..000000000 --- a/senlin/tests/unit/test_hacking.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pycodestyle -import textwrap -from unittest import mock - -from senlin.hacking import checks -from senlin.tests.unit.common import base - - -class HackingTestCase(base.SenlinTestCase): - @mock.patch('pycodestyle._checks', - {'physical_line': {}, 'logical_line': {}, 'tree': {}}) - def _run_check(self, code, checker, filename=None): - pycodestyle.register_check(checker) - - lines = textwrap.dedent(code).strip().splitlines(True) - - checker = pycodestyle.Checker(filename=filename, lines=lines) - checker.check_all() - checker.report._deferred_print.sort() - return checker.report._deferred_print - - def _assert_has_errors(self, code, checker, expected_errors=None, - filename=None): - actual_errors = [e[:3] for e in - self._run_check(code, checker, filename)] - self.assertEqual(expected_errors or [], actual_errors) - - def _assert_has_no_errors(self, code, checker, filename=None): - self._assert_has_errors(code, checker, filename=filename) - - def test_assert_equal_none(self): - self.assertEqual(1, len(list(checks.assert_equal_none( - "self.assertEqual(A, None)")))) - - self.assertEqual(1, len(list(checks.assert_equal_none( - "self.assertEqual(None, A)")))) - - self.assertEqual(0, len(list(checks.assert_equal_none( - "self.assertIsNone()")))) - - def test_use_jsonutils(self): - def __get_msg(fun): - msg = ("S319: jsonutils.%(fun)s must be used instead of " - "json.%(fun)s" % {'fun': fun}) - return [(0, msg)] - - for method in ('dump', 'dumps', 'load', 'loads'): - self.assertEqual(__get_msg(method), list(checks.use_jsonutils( - "json.%s(" % method, "./senlin/engine/cluster.py"))) - self.assertEqual(0, len(list(checks.use_jsonutils( - "jsonx.%s(" % method, "./senlin/engine/cluster.py")))) - self.assertEqual(0, len(list(checks.use_jsonutils( - "json.dumb", "./senlin/engine/cluster.py")))) - - def test_no_mutable_default_args(self): - self.assertEqual(1, len(list(checks.no_mutable_default_args( - "def create_cluster(mapping={}, **params)")))) - - self.assertEqual(0, len(list(checks.no_mutable_default_args( - "defined = []")))) - - self.assertEqual(0, len(list(checks.no_mutable_default_args( - "defined, undefined = [], {}")))) - - def test_api_version_decorator(self): - code = """ - @some_other_decorator - @wsgi.api_version("2.2") - def my_method(): - pass - """ - - actual_error = self._run_check(code, - checks.check_api_version_decorator)[0] - - self.assertEqual(2, actual_error[0]) - self.assertEqual(0, actual_error[1]) - self.assertEqual('S321', actual_error[2]) - self.assertEqual(' The api_version decorator must be the first ' - 'decorator on a method.', - actual_error[3]) - - def test_api_version_decorator_good(self): - code = """ - class SomeController(): - @wsgi.api_version("2.2") - def my_method(): - pass - - """ - - actual_error = self._run_check(code, - checks.check_api_version_decorator) - self.assertEqual(0, len(actual_error)) - - def test_no_log_warn(self): - code = """ - LOG.warn("LOG.warn is deprecated") - """ - errors = [(1, 0, 'S322')] - self._assert_has_errors(code, checks.no_log_warn, - expected_errors=errors) - code = """ - LOG.warning("LOG.warn is deprecated") - """ - self._assert_has_no_errors(code, checks.no_log_warn) - - def test_assert_equal_true(self): - test_value = True - self.assertEqual(0, len(list(checks.assert_equal_true( - "assertTrue(True)")))) - self.assertEqual(1, len(list(checks.assert_equal_true( - "assertEqual(True, %s)" % test_value)))) - self.assertEqual(1, len(list(checks.assert_equal_true( - "assertEqual(%s, True)" % test_value)))) diff --git a/senlin/tests/unit/test_rpc_client.py b/senlin/tests/unit/test_rpc_client.py deleted file mode 100644 index 44ce4ee12..000000000 --- a/senlin/tests/unit/test_rpc_client.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for senlin.rpc.client -""" -from unittest import mock - -from senlin.common import messaging -from senlin.rpc import client as rpc_client -from senlin.tests.unit.common import base -from senlin.tests.unit.common import utils - - -class EngineRpcAPITestCase(base.SenlinTestCase): - - def setUp(self): - messaging.setup("fake://", optional=True) - self.addCleanup(messaging.cleanup) - self.context = utils.dummy_context() - - # self.stubs = stubout.StubOutForTesting() - self.rpcapi = rpc_client.EngineClient() - super(EngineRpcAPITestCase, self).setUp() - - @mock.patch.object(messaging, 'get_rpc_client') - def test_call(self, mock_client): - client = mock.Mock() - mock_client.return_value = client - - method = 'fake_method' - req = mock.Mock() - rpcapi = rpc_client.EngineClient() - - # with no version - res = rpcapi.call(self.context, method, req) - - self.assertEqual(client, rpcapi._client) - client.call.assert_called_once_with(self.context, 'fake_method', - req=req) - self.assertEqual(res, client.call.return_value) - - @mock.patch.object(messaging, 'get_rpc_client') - def test_call_with_version(self, mock_client): - client = mock.Mock() - mock_client.return_value = client - - method = 'fake_method' - req = mock.Mock() - rpcapi = rpc_client.EngineClient() - - # with version - res = rpcapi.call(self.context, method, req, version='123') - - rpcapi._client.prepare.assert_called_once_with(version='123') - new_client = client.prepare.return_value - new_client.call.assert_called_once_with(self.context, 'fake_method', - req=req) - self.assertEqual(res, new_client.call.return_value) - - @mock.patch.object(messaging, 'get_rpc_client') - def test_cast(self, mock_client): - client = mock.Mock() - mock_client.return_value = client - - method = 'fake_method' - kwargs = {'key': 'value'} - rpcapi = rpc_client.EngineClient() - msg = rpcapi.make_msg(method, **kwargs) - - # with no version - res = rpcapi.cast(self.context, msg) - - self.assertEqual(client, rpcapi._client) - client.cast.assert_called_once_with(self.context, 'fake_method', - key='value') - self.assertEqual(res, client.cast.return_value) - - # with version - res = rpcapi.cast(self.context, msg, version='123') - client.prepare.assert_called_once_with(version='123') - new_client = client.prepare.return_value - new_client.cast.assert_called_once_with(self.context, 'fake_method', - key='value') - self.assertEqual(res, new_client.cast.return_value) - - def _test_engine_api(self, method, rpc_method, **kwargs): - ctxt = utils.dummy_context() - expected_retval = 'foo' if method == 'call' else None - - kwargs.pop('version', None) - - if 'expected_message' in kwargs: - expected_message = kwargs['expected_message'] - del kwargs['expected_message'] - else: - expected_message = self.rpcapi.make_msg(method, **kwargs) - - cast_and_call = [ - 'profile_delete', - 'policy_delete', - 'cluster_delete', - 'node_delete', - 'receiver_delete', - 'webhook_delete', - ] - - if rpc_method == 'call' and method in cast_and_call: - kwargs['cast'] = False - - mock_rpc_method = self.patchobject(self.rpcapi, rpc_method, - return_value=expected_retval) - - retval = getattr(self.rpcapi, method)(ctxt, **kwargs) - - self.assertEqual(expected_retval, retval) - expected_args = [ctxt, expected_message, mock.ANY] - actual_args, _ = mock_rpc_method.call_args - for expected_arg, actual_arg in zip(expected_args, actual_args): - self.assertEqual(expected_arg, actual_arg) diff --git a/senlin/version.py b/senlin/version.py deleted file mode 100644 index 848a22fce..000000000 --- a/senlin/version.py +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pbr.version - -version_info = pbr.version.VersionInfo('senlin') -version_string = version_info.version_string diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 27471992a..000000000 --- a/setup.cfg +++ /dev/null @@ -1,81 +0,0 @@ -[metadata] -name = senlin -summary = OpenStack Clustering -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/senlin/latest/ -python_requires = >=3.8 -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 - -[files] -packages = - senlin -data_files = - etc/senlin = - etc/senlin/api-paste.ini - -[entry_points] -console_scripts = - senlin-api = senlin.cmd.api:main - senlin-conductor = senlin.cmd.conductor:main - senlin-engine = senlin.cmd.engine:main - senlin-health-manager = senlin.cmd.health_manager:main - senlin-manage = senlin.cmd.manage:main - senlin-status = senlin.cmd.status:main - -wsgi_scripts = - senlin-wsgi-api = senlin.cmd.api_wsgi:init_app - -oslo.config.opts = - senlin.conf = senlin.conf.opts:list_opts - -oslo.config.opts.defaults = - senlin.conf = senlin.common.config:set_config_defaults - -oslo.policy.policies = - senlin = senlin.common.policies:list_rules - -senlin.drivers = - openstack = senlin.drivers.os - openstack_test = senlin.tests.drivers.os_test - -senlin.profiles = - os.heat.stack-1.0 = senlin.profiles.os.heat.stack:StackProfile - os.nova.server-1.0 = senlin.profiles.os.nova.server:ServerProfile - container.dockerinc.docker-1.0 = senlin.profiles.container.docker:DockerProfile - -senlin.policies = - senlin.policy.deletion-1.0 = senlin.policies.deletion_policy:DeletionPolicy - senlin.policy.deletion-1.1 = senlin.policies.deletion_policy:DeletionPolicy - senlin.policy.scaling-1.0 = senlin.policies.scaling_policy:ScalingPolicy - senlin.policy.health-1.0 = senlin.policies.health_policy:HealthPolicy - senlin.policy.health-1.1 = senlin.policies.health_policy:HealthPolicy - senlin.policy.health-1.2 = senlin.policies.health_policy:HealthPolicy - senlin.policy.loadbalance-1.0 = senlin.policies.lb_policy:LoadBalancingPolicy - senlin.policy.loadbalance-1.1 = senlin.policies.lb_policy:LoadBalancingPolicy - senlin.policy.loadbalance-1.2 = senlin.policies.lb_policy:LoadBalancingPolicy - senlin.policy.loadbalance-1.3 = senlin.policies.lb_policy:LoadBalancingPolicy - senlin.policy.region_placement-1.0 = senlin.policies.region_placement:RegionPlacementPolicy - senlin.policy.zone_placement-1.0 = senlin.policies.zone_placement:ZonePlacementPolicy - senlin.policy.affinity-1.0 = senlin.policies.affinity_policy:AffinityPolicy - senlin.policy.batch-1.0 = senlin.policies.batch_policy:BatchPolicy - -senlin.dispatchers = - database = senlin.events.database:DBEvent - message = senlin.events.message:MessageEvent - -senlin.endpoints = - heat = senlin.engine.notifications.heat_endpoint:HeatNotificationEndpoint - nova = senlin.engine.notifications.nova_endpoint:NovaNotificationEndpoint - diff --git a/setup.py b/setup.py deleted file mode 100644 index f63cc23c5..000000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 3251ca64a..000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -# Hacking already pins down pep8, pyflakes and flake8 -hacking>=3.0.1,<3.1.0 # Apache-2.0 -coverage>=4.5.1 # Apache-2.0 -oslotest>=3.3.0 # Apache-2.0 -stestr>=2.0.0 # Apache-2.0 -PyMySQL>=0.8.0 # MIT License -tempest>=21.0.0 # Apache-2.0 -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=2.2.0 # MIT - -# Bandit build requirements -bandit>=1.1.0 # Apache-2.0 diff --git a/tools/README.rst b/tools/README.rst deleted file mode 100644 index afaec1630..000000000 --- a/tools/README.rst +++ /dev/null @@ -1,60 +0,0 @@ - -Files in this directory are tools for developers or for helping users install -the senlin software. - --------- -Contents --------- - -``config-generator.conf`` - - This is a configuration for the oslo-config-generator tool to create an - initial `senlin.conf.sample` file. When installing senlin manually, the - generated file can be copied to `/etc/senlin/senlin.conf` with customized - settings. - - -``gen-config`` - - This is a wrapper of the oslo-config-generator tool that generates a config - file for senlin. The correct way to use it is:: - - cd /opt/stack/senlin - tools/gen-config - - Another way to generate sample configuration file is:: - - cd /opt/stack/senlin - tox -e genconfig - - -``gen-pot-files`` - - This is a script for extracting strings from source code into a POT file, - which serves the basis to generate translations for different languages. - - -``senlin-db-recreate`` - - This script drops the `senlin` database in mysql when database is corrupted. - - **Warning** - Be sure to change the 'MYSQL_ROOT_PW' and 'MYSQL_SENLIN_PW' before running - this script. - - -``setup-service`` - - This is a script for setting up the ``senlin`` service. You will need to - provide the host IP address and the service password for the ``senlin`` - user to be created. For example:: - - cd /opt/stack/senlin/tools - ./setup-service 192.168.52.5 TopSecrete - - **NOTE** - You need to have some environment variables properly set so that you are - the ``admin`` user for setting up the ``senlin`` service. For example:: - - cd $HOME - . devstack/openrc admin diff --git a/tools/config-generator.conf b/tools/config-generator.conf deleted file mode 100644 index 9163f86df..000000000 --- a/tools/config-generator.conf +++ /dev/null @@ -1,15 +0,0 @@ -[DEFAULT] -output_file = etc/senlin/senlin.conf.sample -wrap_width = 119 -namespace = senlin.conf -namespace = keystonemiddleware.auth_token -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.middleware.cors -namespace = oslo.middleware.http_proxy_to_wsgi -namespace = oslo.policy -namespace = oslo.service.periodic_task -namespace = oslo.service.service -namespace = oslo.service.sslutils -namespace = osprofiler diff --git a/tools/gen-config b/tools/gen-config deleted file mode 100755 index 61eb6b335..000000000 --- a/tools/gen-config +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -oslo-config-generator --config-file=tools/config-generator.conf diff --git a/tools/gen-policy b/tools/gen-policy deleted file mode 100755 index dbfe1252f..000000000 --- a/tools/gen-policy +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -oslopolicy-sample-generator --config-file tools/policy-generator.conf diff --git a/tools/gen-pot-files b/tools/gen-pot-files deleted file mode 100755 index fd44cffa1..000000000 --- a/tools/gen-pot-files +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -python setup.py extract_messages --input-dirs "senlin/api,senlin/cmd,senlin/common,senlin/db,senlin/drivers,senlin/engine,senlin/policies,senlin/profiles,senlint/webhooks,senlin/openstack" diff --git a/tools/policy-generator.conf b/tools/policy-generator.conf deleted file mode 100644 index 411a51a46..000000000 --- a/tools/policy-generator.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -output_file = etc/senlin/policy.yaml.sample -namespace = senlin \ No newline at end of file diff --git a/tools/senlin-db-recreate b/tools/senlin-db-recreate deleted file mode 100755 index a64826f16..000000000 --- a/tools/senlin-db-recreate +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -MYSQL_ROOT_PW=${MYSQL_ROOT_PW:-openstack} -MYSQL_SENLIN_PW=${MYSQL_SENLIN_PW:-openstack} - -echo "Recreating 'senlin' database." -cat << EOF | mysql -u root --password=${MYSQL_ROOT_PW} -DROP DATABASE IF EXISTS senlin; -CREATE DATABASE senlin DEFAULT CHARACTER SET utf8; -GRANT ALL ON senlin.* TO 'senlin'@'localhost' IDENTIFIED BY '${MYSQL_SENLIN_PW}'; -GRANT ALL ON senlin.* TO 'senlin'@'%' IDENTIFIED BY '${MYSQL_SENLIN_PW}'; -flush privileges; -EOF - -senlin-manage db_sync diff --git a/tools/setup-service b/tools/setup-service deleted file mode 100755 index d214a002c..000000000 --- a/tools/setup-service +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -if [[ -z $OS_AUTH_URL ]]; then - echo "This script must have proper environment variables exported. " - echo "Please check if you have sourced senlinrc file or openrc file if " - echo "you are using devstack." - exit -1 -fi - -if [ $OS_USERNAME != 'admin' ]; then - echo "This script has to be executed as an 'admin' user. " - echo "Please set environment variable OS_USERNAME to 'admin'." - exit -1 -fi - -if [ $# -ne 2 ]; then - echo "Usage: `basename $0` " - exit -1 -fi - -PORT=8777 -HOST=$1 # Put your host IP here -SVC_PASSWD=$2 -OS_REGION_NAME=${OS_REGION_NAME:-RegionOne} -OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-3} -SERVICE_PROJECT=${OS_SERVICE_PROJECT:-service} -SERVICE_ROLE=${OS_SERVICE_ROLE:-service} - -SERVICE_ID=$(openstack service show senlin -f value -cid 2>/dev/null) -if [[ -z $SERVICE_ID ]]; then - SERVICE_ID=$(openstack service create \ - --name senlin \ - --description 'Senlin Clustering Service V1' \ - -f value -cid \ - clustering) -fi - -if [[ -z $SERVICE_ID ]]; then - exit -fi - -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - openstack endpoint create senlin admin "http://$HOST:$PORT" \ - --region $OS_REGION_NAME - openstack endpoint create senlin public "http://$HOST:$PORT" \ - --region $OS_REGION_NAME - openstack endpoint create senlin internal "http://$HOST:$PORT" \ - --region $OS_REGION_NAME -else - openstack endpoint create \ - --adminurl "http://$HOST:$PORT" \ - --publicurl "http://$HOST:$PORT" \ - --internalurl "http://$HOST:$PORT" \ - --region $OS_REGION_NAME \ - senlin -fi - -# Check service project name. -# Devstack uses 'service' while some distributions use 'services' -PROJECT_ID=$(openstack project show service -f value -cid 2>/dev/null) -if [[ -z $PROJECT_ID ]]; then - SERVICE_PROJECT=services - SERVICE_ROLE=services - openstack role create $SERVICE_ROLE -fi - -openstack user create \ - --password "$SVC_PASSWD" \ - --project $SERVICE_PROJECT \ - --email senlin@localhost \ - senlin - -openstack role add \ - admin \ - --user senlin \ - --project $SERVICE_PROJECT - - # make sure 'senlin' has service role on 'demo' project -openstack role add \ - $SERVICE_ROLE \ - --user senlin \ - --project demo diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 036f1f737..000000000 --- a/tox.ini +++ /dev/null @@ -1,120 +0,0 @@ -[tox] -minversion = 3.1.1 -envlist = py3,pep8 -ignore_basepython_conflict = True - -[testenv] -basepython = python3 -setenv = - VIRTUAL_ENV={envdir} - LANGUAGE=C.UTF-8 - LC_ALL=C.UTF-8 - OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} - OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} - OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} - OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:180} - PYTHONDONTWRITEBYTECODE=1 - PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site -passenv = - http_proxy - HTTP_PROXY - https_proxy - HTTPS_PROXY - no_proxy - NO_PROXY - OS_DEBUG -usedevelop = True -install_command = - pip install {opts} {packages} -commands = - find . -type f -name "*.py[c|o]" -delete - stestr run {posargs} - stestr slowest -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -allowlist_externals = - bash - find - rm - -[testenv:debug] -basepython = python3 -commands = oslo_debug_helper -t senlin/tests/unit {posargs} - -[testenv:pep8] -commands = - flake8 senlin doc/source/ext - -[testenv:genconfig] -envdir = {toxworkdir}/venv -commands = - {toxinidir}/tools/gen-config - -[testenv:genpolicy] -envdir = {toxworkdir}/venv -commands = - {toxinidir}/tools/gen-policy - -[testenv:venv] -commands = {posargs} - -[testenv:cover] -setenv = - {[testenv]setenv} - PYTHON=coverage run --source senlin --parallel-mode -commands = - coverage erase - find . -type f -name "*.pyc" -delete - stestr run --no-subunit-trace {posargs} - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report --fail-under=90 --skip-covered - -[testenv:docs] -deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} - -r{toxinidir}/requirements.txt - -r{toxinidir}/doc/requirements.txt -commands = - rm -rf doc/build - sphinx-build -W --keep-going -b html doc/source doc/build/html -allowlist_externals = rm - -[testenv:releasenotes] -deps = -r{toxinidir}/doc/requirements.txt -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html - -[testenv:api-ref] -deps = -r{toxinidir}/doc/requirements.txt -commands = - sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html - -[flake8] -# Temporarily disable complaints about docstring for public module/class/method -# H106 Don't put vim configuration in source files -# H203 Use assertIs(Not)None to check for None -ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D300,D301,D400,D401,I100,I201,W504,W605 -enable-extensions=H106,H203,H204,H205 -show-source = true -exclude=.venv,.git,.tox,cover,dist,*lib/python*,*egg,tools,build,releasenotes -max-complexity=20 - -[hacking] -import_exceptions = senlin.common.i18n - -[flake8:local-plugins] -extension = - S318 = checks:assert_equal_none - S319 = checks:use_jsonutils - S320 = checks:no_mutable_default_args - S321 = checks:check_api_version_decorator - S322 = checks:no_log_warn - S323 = checks:assert_equal_true -paths = ./senlin/hacking - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r senlin -x tests -s B101,B104,B110,B310,B311,B506 diff --git a/uninstall.sh b/uninstall.sh deleted file mode 100755 index 97f661a33..000000000 --- a/uninstall.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -if [ $EUID -ne 0 ]; then - echo "This script must be run as root." - exit -fi - -type -P pip-python &> /dev/null && have_pip_python=1 || have_pip_python=0 -if [ $have_pip_python -eq 1 ]; then - pip-python uninstall -y senlin - exit -fi - -type -P pip &> /dev/null && have_pip=1 || have_pip=0 -if [ $have_pip -eq 1 ]; then - pip uninstall -y senlin - exit -fi - -echo "pip-python not found. install package (probably python-pip) or run -'easy_install pip', then rerun $0";